diff --git a/go.mod b/go.mod
index 0d224fee05..570d1235dd 100644
--- a/go.mod
+++ b/go.mod
@@ -1,8 +1,6 @@
module github.com/loft-sh/devspace
-go 1.24
-
-toolchain go1.24.4
+go 1.25.0
require (
github.com/AlecAivazis/survey/v2 v2.3.2
@@ -41,8 +39,8 @@ require (
github.com/moby/patternmatcher v0.6.0
github.com/moby/term v0.0.0-20221205130635-1aeaba878587
github.com/olekukonko/tablewriter v0.0.5
- github.com/onsi/ginkgo/v2 v2.13.0
- github.com/onsi/gomega v1.29.0
+ github.com/onsi/ginkgo/v2 v2.27.2
+ github.com/onsi/gomega v1.38.2
github.com/otiai10/copy v1.7.0
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.1
@@ -52,29 +50,30 @@ require (
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.10
github.com/vmware-labs/yaml-jsonpath v0.3.2
- golang.org/x/crypto v0.40.0
- golang.org/x/net v0.42.0
- golang.org/x/text v0.27.0
+ golang.org/x/crypto v0.44.0
+ golang.org/x/net v0.47.0
+ golang.org/x/text v0.31.0
google.golang.org/grpc v1.74.2
- google.golang.org/protobuf v1.36.6
+ google.golang.org/protobuf v1.36.8
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/src-d/go-git.v4 v4.13.1
gopkg.in/yaml.v3 v3.0.1
gotest.tools v2.2.0+incompatible
k8s.io/api v0.29.0
- k8s.io/apimachinery v0.29.0
+ k8s.io/apimachinery v0.35.0
k8s.io/client-go v0.29.0
k8s.io/klog v1.0.0
- k8s.io/klog/v2 v2.110.1
+ k8s.io/klog/v2 v2.130.1
k8s.io/kubectl v0.29.0
mvdan.cc/sh/v3 v3.5.1
- sigs.k8s.io/yaml v1.4.0
+ sigs.k8s.io/yaml v1.6.0
)
require (
cloud.google.com/go/compute/metadata v0.7.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
+ github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/beorn7/perks v1.0.1 // indirect
@@ -100,24 +99,24 @@ require (
github.com/emirpasic/gods v1.12.0 // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.3 // indirect
- github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.0.1 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-github/v30 v30.1.0 // indirect
github.com/google/go-querystring v1.0.0 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
+ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
@@ -145,7 +144,7 @@ require (
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.1.0 // indirect
- github.com/moby/spdystream v0.2.0 // indirect
+ github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/moby/sys/capability v0.4.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
@@ -155,7 +154,7 @@ require (
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -178,6 +177,7 @@ require (
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f // indirect
github.com/ulikunitz/xz v0.5.14 // indirect
github.com/vbatts/tar-split v0.12.1 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
github.com/xanzy/ssh-agent v0.2.1 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
@@ -193,12 +193,15 @@ require (
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ golang.org/x/mod v0.29.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
- golang.org/x/sync v0.16.0 // indirect
- golang.org/x/sys v0.34.0 // indirect
- golang.org/x/term v0.33.0 // indirect
- golang.org/x/time v0.3.0 // indirect
- golang.org/x/tools v0.34.0 // indirect
+ golang.org/x/sync v0.18.0 // indirect
+ golang.org/x/sys v0.38.0 // indirect
+ golang.org/x/term v0.37.0 // indirect
+ golang.org/x/time v0.9.0 // indirect
+ golang.org/x/tools v0.38.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
@@ -207,12 +210,14 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/cli-runtime v0.29.0 // indirect
k8s.io/component-base v0.29.0 // indirect
- k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
- k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
replace github.com/distribution/reference => github.com/distribution/reference v0.5.0
diff --git a/go.sum b/go.sum
index 5930af4bf0..6a73798288 100644
--- a/go.sum
+++ b/go.sum
@@ -15,6 +15,8 @@ github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA=
@@ -166,9 +168,17 @@ github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fujiwara/shapeio v1.0.0 h1:xG5D9oNqCSUUbryZ/jQV3cqe1v2suEjwPIcEg1gKM8M=
github.com/fujiwara/shapeio v1.0.0/go.mod h1:LmEmu6L/8jetyj1oewewFb7bZCNRwE7wLCUNzDLaLVA=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gertd/go-pluralize v0.2.0 h1:VzWNnxkUo3wkW2Nmp+3ieHSTQQ0LBHeSVxlKsQPQ+UY=
github.com/gertd/go-pluralize v0.2.0/go.mod h1:4ouO1Ndf/r7sZMorwp4Sbfw80lUni+sd+o3qJR8L9To=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
+github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
+github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
+github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
+github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
+github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
@@ -180,23 +190,26 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
+github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
@@ -229,8 +242,8 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -251,8 +264,8 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -261,7 +274,6 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
@@ -277,7 +289,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk=
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
@@ -298,6 +309,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
+github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -346,6 +359,8 @@ github.com/loft-sh/utils v0.0.16/go.mod h1:n2L3X4i7d8kb2NF+q5duKa41N+N6fBde6XY2A
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
+github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@@ -360,6 +375,8 @@ github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
+github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -380,8 +397,8 @@ github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
-github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
-github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
+github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
@@ -405,8 +422,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
@@ -422,12 +440,12 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
-github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
+github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
+github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
-github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
+github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
@@ -483,8 +501,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0=
@@ -527,17 +545,24 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/syncthing/notify v0.0.0-20250528144937-c7027d4f7465 h1:yhxdTGmFkAM2TFA65c3NgGwpnIkUM8oVqPX2e9S7IVg=
github.com/syncthing/notify v0.0.0-20250528144937-c7027d4f7465/go.mod h1:J0q59IWjLtpRIJulohwqEZvjzwOfTEPp8SVhDJl+y0Y=
github.com/tcnksm/go-gitconfig v0.1.2 h1:iiDhRitByXAEyjgBqsKi9QU4o2TNtv9kPP3RgPgXBPw=
github.com/tcnksm/go-gitconfig v0.1.2/go.mod h1:/8EhP4H7oJZdIPyT+/UIsG87kTzrzM4UsLGSItWYCpE=
+github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
+github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
+github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
+github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
+github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa h1:XOFp/3aBXlqmOFAg3r6e0qQjPnK5I970LilqX+Is1W8=
github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa/go.mod h1:AvLEd1LEIl64G2Jpgwo7aVV5lGH0ePcKl0ygGIHNYl8=
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
@@ -551,6 +576,8 @@ github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnn
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/vmware-labs/yaml-jsonpath v0.3.2 h1:/5QKeCBGdsInyDCyVNLbXyilb61MXGi9NP674f9Hobk=
github.com/vmware-labs/yaml-jsonpath v0.3.2/go.mod h1:U6whw1z03QyqgWdgXxvVnQ90zN1BWz5V+51Ewf8k+rQ=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
@@ -600,6 +627,10 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -611,14 +642,16 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
-golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
+golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
+golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -638,8 +671,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
-golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
-golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -652,8 +685,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
-golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -667,7 +700,6 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -681,27 +713,27 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
-golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
-golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
-golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
+golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -711,8 +743,8 @@ golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDq
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
-golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
+golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -753,8 +785,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
-google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -798,8 +830,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A=
k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA=
-k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o=
-k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4=
k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk=
k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8=
@@ -808,23 +840,27 @@ k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
-k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
-k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
-k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI=
k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
mvdan.cc/sh/v3 v3.5.1 h1:hmP3UOw4f+EYexsJjFxvU38+kn+V/s2CclXHanIBkmQ=
mvdan.cc/sh/v3 v3.5.1/go.mod h1:1JcoyAKm1lZw/2bZje/iYKWicU/KMd0rsyJeKHnsK4E=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY=
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U=
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore
new file mode 100644
index 0000000000..6b061e6174
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/.gitignore
@@ -0,0 +1 @@
+_fuzz/
\ No newline at end of file
diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
new file mode 100644
index 0000000000..fbc6332592
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
@@ -0,0 +1,27 @@
+run:
+ deadline: 2m
+
+linters:
+ disable-all: true
+ enable:
+ - misspell
+ - govet
+ - staticcheck
+ - errcheck
+ - unparam
+ - ineffassign
+ - nakedret
+ - gocyclo
+ - dupl
+ - goimports
+ - revive
+ - gosec
+ - gosimple
+ - typecheck
+ - unused
+
+linters-settings:
+ gofmt:
+ simplify: true
+ dupl:
+ threshold: 600
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
new file mode 100644
index 0000000000..fabe5e43dc
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -0,0 +1,268 @@
+# Changelog
+
+## 3.4.0 (2025-06-27)
+
+### Added
+
+- #268: Added property to Constraints to include prereleases for Check and Validate
+
+### Changed
+
+- #263: Updated Go testing for 1.24, 1.23, and 1.22
+- #269: Updated the error message handling for message case and wrapping errors
+- #266: Restore the ability to have leading 0's when parsing with NewVersion.
+ Opt-out of this by setting CoerceNewVersion to false.
+
+### Fixed
+
+- #257: Fixed the CodeQL link (thanks @dmitris)
+- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out
+ of this by setting DetailedNewVersionErrors to false for faster performance.
+- #267: Handle pre-releases for an "and" group if one constraint includes them
+
+## 3.3.1 (2024-11-19)
+
+### Fixed
+
+- #253: Fix for allowing some version that were invalid
+
+## 3.3.0 (2024-08-27)
+
+### Added
+
+- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
+- #213: nil version equality checking (thanks @KnutZuidema)
+
+### Changed
+
+- #241: Simplify StrictNewVersion parsing (thanks @grosser)
+- Testing support up through Go 1.23
+- Minimum version set to 1.21 as this is what's tested now
+- Fuzz testing now supports caching
+
+## 3.2.1 (2023-04-10)
+
+### Changed
+
+- #198: Improved testing around pre-release names
+- #200: Improved code scanning with addition of CodeQL
+- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
+- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
+- #203: Docs updated for security details
+
+### Fixed
+
+- #199: Fixed issue with range transformations
+
+## 3.2.0 (2022-11-28)
+
+### Added
+
+- #190: Added text marshaling and unmarshaling
+- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg)
+- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker)
+- #179: Added New() version constructor (thanks @kazhuravlev)
+
+### Changed
+
+- #182/#183: Updated CI testing setup
+
+### Fixed
+
+- #186: Fixing issue where validation of constraint section gave false positives
+- #176: Fix constraints check with *-0 (thanks @mtt0)
+- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni)
+- #161: Fixed godoc (thanks @afirth)
+
+## 3.1.1 (2020-11-23)
+
+### Fixed
+
+- #158: Fixed issue with generated regex operation order that could cause problem
+
+## 3.1.0 (2020-04-15)
+
+### Added
+
+- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah)
+
+### Changed
+
+- #148: More accurate validation messages on constraints
+
+## 3.0.3 (2019-12-13)
+
+### Fixed
+
+- #141: Fixed issue with <= comparison
+
+## 3.0.2 (2019-11-14)
+
+### Fixed
+
+- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos)
+
+## 3.0.1 (2019-09-13)
+
+### Fixed
+
+- #125: Fixes issue with module path for v3
+
+## 3.0.0 (2019-09-12)
+
+This is a major release of the semver package which includes API changes. The Go
+API is compatible with ^1. The Go API was not changed because many people are using
+`go get` without Go modules for their applications and API breaking changes cause
+errors which we have or would need to support.
+
+The changes in this release are the handling based on the data passed into the
+functions. These are described in the added and changed sections below.
+
+### Added
+
+- StrictNewVersion function. This is similar to NewVersion but will return an
+ error if the version passed in is not a strict semantic version. For example,
+ 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly
+ speaking semantic versions. This function is faster, performs fewer operations,
+ and uses fewer allocations than NewVersion.
+- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint.
+ The Makefile contains the operations used. For more information on you can start
+ on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing
+- Now using Go modules
+
+### Changed
+
+- NewVersion has proper prerelease and metadata validation with error messages
+ to signal an issue with either of them
+- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the
+ version is >=1 the ^ ranges works the same as v1. For major versions of 0 the
+ rules have changed. The minor version is treated as the stable version unless
+ a patch is specified and then it is equivalent to =. One difference from npm/js
+ is that prereleases there are only to a specific version (e.g. 1.2.3).
+ Prereleases here look over multiple versions and follow semantic version
+ ordering rules. This pattern now follows along with the expected and requested
+ handling of this packaged by numerous users.
+
+## 1.5.0 (2019-09-11)
+
+### Added
+
+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
+
+### Changed
+
+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
+- #72: Adding docs comment pointing to vert for a cli
+- #71: Update the docs on pre-release comparator handling
+- #89: Test with new go versions (thanks @thedevsaddam)
+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
+
+### Fixed
+
+- #78: Fix unchecked error in example code (thanks @ravron)
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+- #97: Fixed copyright file for proper display on GitHub
+- #107: Fix handling prerelease when sorting alphanum and num
+- #109: Fixed where Validate sometimes returns wrong message on error
+
+## 1.4.2 (2018-04-10)
+
+### Changed
+
+- #72: Updated the docs to point to vert for a console appliaction
+- #71: Update the docs on pre-release comparator handling
+
+### Fixed
+
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+
+## 1.4.1 (2018-04-02)
+
+### Fixed
+
+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
+
+## 1.4.0 (2017-10-04)
+
+### Changed
+
+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
+
+## 1.3.1 (2017-07-10)
+
+### Fixed
+
+- Fixed #57: number comparisons in prerelease sometimes inaccurate
+
+## 1.3.0 (2017-05-02)
+
+### Added
+
+- #45: Added json (un)marshaling support (thanks @mh-cbon)
+- Stability marker. See https://masterminds.github.io/stability/
+
+### Fixed
+
+- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
+
+### Changed
+
+- #55: The godoc icon moved from png to svg
+
+## 1.2.3 (2017-04-03)
+
+### Fixed
+
+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
+
+## Release 1.2.2 (2016-12-13)
+
+### Fixed
+
+- #34: Fixed issue where hyphen range was not working with pre-release parsing.
+
+## Release 1.2.1 (2016-11-28)
+
+### Fixed
+
+- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
+ properly.
+
+## Release 1.2.0 (2016-11-04)
+
+### Added
+
+- #20: Added MustParse function for versions (thanks @adamreese)
+- #15: Added increment methods on versions (thanks @mh-cbon)
+
+### Fixed
+
+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
+ might not satisfy the intended compatibility. The change here ignores pre-releases
+ on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
+ constraint. For example, `^1.2.3` will ignore pre-releases while
+ `^1.2.3-alpha` will include them.
+
+## Release 1.1.1 (2016-06-30)
+
+### Changed
+
+- Issue #9: Speed up version comparison performance (thanks @sdboyer)
+- Issue #8: Added benchmarks (thanks @sdboyer)
+- Updated Go Report Card URL to new location
+- Updated Readme to add code snippet formatting (thanks @mh-cbon)
+- Updating tagging to v[SemVer] structure for compatibility with other tools.
+
+## Release 1.1.0 (2016-03-11)
+
+- Issue #2: Implemented validation to provide reasons a versions failed a
+ constraint.
+
+## Release 1.0.1 (2015-12-31)
+
+- Fixed #1: * constraint failing on valid versions.
+
+## Release 1.0.0 (2015-10-20)
+
+- Initial release
diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
new file mode 100644
index 0000000000..9ff7da9c48
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
new file mode 100644
index 0000000000..9ca87a2c79
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -0,0 +1,31 @@
+GOPATH=$(shell go env GOPATH)
+GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT)
+ @echo "==> Linting codebase"
+ @$(GOLANGCI_LINT) run
+
+.PHONY: test
+test:
+ @echo "==> Running tests"
+ GO111MODULE=on go test -v
+
+.PHONY: test-cover
+test-cover:
+ @echo "==> Running Tests with coverage"
+ GO111MODULE=on go test -cover .
+
+.PHONY: fuzz
+fuzz:
+ @echo "==> Running Fuzz Tests"
+ go env GOCACHE
+ go test -fuzz=FuzzNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzNewConstraint -fuzztime=15s .
+
+$(GOLANGCI_LINT):
+ # Install golangci-lint. The configuration for it is in the .golangci.yml
+ # file in the root of the repository
+ echo ${GOPATH}
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
new file mode 100644
index 0000000000..2f56c676a5
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/README.md
@@ -0,0 +1,274 @@
+# SemVer
+
+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
+
+* Parse semantic versions
+* Sort semantic versions
+* Check if a semantic version fits within a set of constraints
+* Optionally work with a `v` prefix
+
+[](https://masterminds.github.io/stability/active.html)
+[](https://github.com/Masterminds/semver/actions)
+[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
+[](https://goreportcard.com/report/github.com/Masterminds/semver)
+
+## Package Versions
+
+Note, import `github.com/Masterminds/semver/v3` to use the latest version.
+
+There are three major versions fo the `semver` package.
+
+* 3.x.x is the stable and active version. This version is focused on constraint
+ compatibility for range handling in other tools from other languages. It has
+ a similar API to the v1 releases. The development of this version is on the master
+ branch. The documentation for this version is below.
+* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
+ no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
+ There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
+* 1.x.x is the original release. It is no longer maintained. You should use the
+ v3 release instead. You can read the documentation for the 1.x.x release
+ [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
+
+## Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an error is returned if there is an issue parsing the
+version. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+build345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. Getting the original string is useful if the semantic version was coerced
+into a valid form.
+
+There are package level variables that affect how `NewVersion` handles parsing.
+
+- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant
+ versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch
+ part. This enables the use of CalVer in versions even when not compliant with SemVer.
+ When set to `false` less coercion work is done.
+- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when
+ `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true`
+ it can provide some more insight into why a version is invalid. Setting
+ `DetailedNewVersionErrors` to `false` is faster on performance but provides less
+ detailed error messages if a version fails to parse.
+
+## Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+```go
+raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+vs := make([]*semver.Version, len(raw))
+for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+}
+
+sort.Sort(semver.Collection(vs))
+```
+
+## Checking Version Constraints
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other uses `Constraints`. There are some important
+differences to notes between these two methods of comparison.
+
+1. When two versions are compared using functions such as `Compare`, `LessThan`,
+ and others it will follow the specification and always include pre-releases
+ within the comparison. It will provide an answer that is valid with the
+ comparison section of the spec at https://semver.org/#spec-item-11
+2. When constraint checking is used for checks or validation it will follow a
+ different set of rules that are common for ranges with tools like npm/js
+ and Rust/Cargo. This includes considering pre-releases to be invalid if the
+ ranges does not include one. If you want to have it include pre-releases a
+ simple solution is to include `-0` in your range.
+3. Constraint ranges can have some complex rules including the shorthand use of
+ ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns while PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+```go
+c, err := semver.NewConstraint(">= 1.2.3")
+if err != nil {
+ // Handle constraint not being parsable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+ // Handle version not being parsable.
+}
+// Check if the version meets the constraints. The variable a will be true.
+a := c.Check(v)
+```
+
+### Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of space or comma separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+* `=`: equal (aliased to no operator)
+* `!=`: not equal
+* `>`: greater than
+* `<`: less than
+* `>=`: greater than or equal to
+* `<=`: less than or equal to
+
+### Working With Prerelease Versions
+
+Pre-releases, for those not familiar with them, are used for software releases
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
+order of precedence, pre-releases come before their associated releases. In this
+example `1.2.3-beta.1 < 1.2.3`.
+
+According to the Semantic Version specification, pre-releases may not be
+API compliant with their release counterpart. It says,
+
+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
+
+SemVer's comparisons using constraints without a pre-release comparator will skip
+pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
+
+The reason for the `0` as a pre-release version in the example comparison is
+because pre-releases can only contain ASCII alphanumerics and hyphens (along with
+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the
+spec. The lowest character is a `0` in ASCII sort order
+(see an [ASCII Table](http://www.asciitable.com/))
+
+Understanding ASCII sort ordering is important because A-Z comes before a-z. That
+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
+the spec specifies.
+
+The `Constraints` instance returned from `semver.NewConstraint()` has a property
+`IncludePrerelease` that, when set to true, will return prerelease versions when calls
+to `Check()` and `Validate()` are made.
+
+### Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
+parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
+
+### Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the patch level comparison (see tilde below). For example,
+
+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `>= 1.2.x` is equivalent to `>= 1.2.0`
+* `<= 2.x` is equivalent to `< 3`
+* `*` is equivalent to `>= 0.0.0`
+
+### Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+* `~1` is equivalent to `>= 1, < 2`
+* `~2.3` is equivalent to `>= 2.3, < 2.4`
+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `~1.x` is equivalent to `>= 1, < 2`
+
+### Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+* `^2.3` is equivalent to `>= 2.3, < 3`
+* `^2.x` is equivalent to `>= 2.0.0, < 3`
+* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+* `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+* `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+* `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+## Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+```go
+c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+if err != nil {
+ // Handle constraint not being parseable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+ // Handle version not being parseable.
+}
+
+// Validate a version against a constraint.
+a, msgs := c.Validate(v)
+// a is false
+for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+}
+```
+
+## Contribute
+
+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
+or [create a pull request](https://github.com/Masterminds/semver/pulls).
+
+## Security
+
+Security is an important consideration for this project. The project currently
+uses the following tools to help discover security issues:
+
+* [CodeQL](https://codeql.github.com)
+* [gosec](https://github.com/securego/gosec)
+* Daily Fuzz testing
+
+If you believe you have found a security vulnerability you can privately disclose
+it through the [GitHub security page](https://github.com/Masterminds/semver/security).
diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md
new file mode 100644
index 0000000000..a30a66b1f7
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+The following versions of semver are currently supported:
+
+| Version | Supported |
+| ------- | ------------------ |
+| 3.x | :white_check_mark: |
+| 2.x | :x: |
+| 1.x | :x: |
+
+Fixes are only released for the latest minor version in the form of a patch release.
+
+## Reporting a Vulnerability
+
+You can privately disclose a vulnerability through GitHubs
+[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories)
+mechanism.
diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go
new file mode 100644
index 0000000000..a78235895f
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/collection.go
@@ -0,0 +1,24 @@
+package semver
+
+// Collection is a collection of Version instances and implements the sort
+// interface. See the sort package for more details.
+// https://golang.org/pkg/sort/
+type Collection []*Version
+
+// Len returns the length of a collection. The number of Version instances
+// on the slice.
+func (c Collection) Len() int {
+ return len(c)
+}
+
+// Less is needed for the sort interface to compare two Version objects on the
+// slice. If checks if one is less than the other.
+func (c Collection) Less(i, j int) bool {
+ return c[i].LessThan(c[j])
+}
+
+// Swap is needed for the sort interface to replace the Version objects
+// at two different positions in the slice.
+func (c Collection) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go
new file mode 100644
index 0000000000..8b7a10f836
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/constraints.go
@@ -0,0 +1,601 @@
+package semver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Constraints is one or more constraint that a semantic version can be
+// checked against.
+type Constraints struct {
+ constraints [][]*constraint
+ containsPre []bool
+
+ // IncludePrerelease specifies if pre-releases should be included in
+ // the results. Note, if a constraint range has a prerelease than
+ // prereleases will be included for that AND group even if this is
+ // set to false.
+ IncludePrerelease bool
+}
+
+// NewConstraint returns a Constraints instance that a Version instance can
+// be checked against. If there is a parse error it will be returned.
+func NewConstraint(c string) (*Constraints, error) {
+
+ // Rewrite - ranges into a comparison operation.
+ c = rewriteRange(c)
+
+ ors := strings.Split(c, "||")
+ lenors := len(ors)
+ or := make([][]*constraint, lenors)
+ hasPre := make([]bool, lenors)
+ for k, v := range ors {
+ // Validate the segment
+ if !validConstraintRegex.MatchString(v) {
+ return nil, fmt.Errorf("improper constraint: %s", v)
+ }
+
+ cs := findConstraintRegex.FindAllString(v, -1)
+ if cs == nil {
+ cs = append(cs, v)
+ }
+ result := make([]*constraint, len(cs))
+ for i, s := range cs {
+ pc, err := parseConstraint(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // If one of the constraints has a prerelease record this.
+ // This information is used when checking all in an "and"
+ // group to ensure they all check for prereleases.
+ if pc.con.pre != "" {
+ hasPre[k] = true
+ }
+
+ result[i] = pc
+ }
+ or[k] = result
+ }
+
+ o := &Constraints{
+ constraints: or,
+ containsPre: hasPre,
+ }
+ return o, nil
+}
+
+// Check tests if a version satisfies the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ // TODO(mattfarina): For v4 of this library consolidate the Check and Validate
+ // functions as the underlying functions make that possible now.
+ // loop over the ORs and check the inner ANDs
+ for i, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check {
+ joy = false
+ break
+ }
+ }
+
+ if joy {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Validate checks if a version satisfies a constraint. If not a slice of
+// reasons for the failure are returned in addition to a bool.
+func (cs Constraints) Validate(v *Version) (bool, []error) {
+ // loop over the ORs and check the inner ANDs
+ var e []error
+
+ // Capture the prerelease message only once. When it happens the first time
+ // this var is marked
+ var prerelesase bool
+ for i, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ // Before running the check handle the case there the version is
+ // a prerelease and the check is not searching for prereleases.
+ if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" {
+ if !prerelesase {
+ em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ e = append(e, em)
+ prerelesase = true
+ }
+ joy = false
+
+ } else {
+
+ if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil {
+ e = append(e, err)
+ joy = false
+ }
+ }
+ }
+
+ if joy {
+ return true, []error{}
+ }
+ }
+
+ return false, e
+}
+
+func (cs Constraints) String() string {
+ buf := make([]string, len(cs.constraints))
+ var tmp bytes.Buffer
+
+ for k, v := range cs.constraints {
+ tmp.Reset()
+ vlen := len(v)
+ for kk, c := range v {
+ tmp.WriteString(c.string())
+
+ // Space separate the AND conditions
+ if vlen > 1 && kk < vlen-1 {
+ tmp.WriteString(" ")
+ }
+ }
+ buf[k] = tmp.String()
+ }
+
+ return strings.Join(buf, " || ")
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (cs *Constraints) UnmarshalText(text []byte) error {
+ temp, err := NewConstraint(string(text))
+ if err != nil {
+ return err
+ }
+
+ *cs = *temp
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (cs Constraints) MarshalText() ([]byte, error) {
+ return []byte(cs.String()), nil
+}
+
+var constraintOps map[string]cfunc
+var constraintRegex *regexp.Regexp
+var constraintRangeRegex *regexp.Regexp
+
+// Used to find individual constraints within a multi-constraint string
+var findConstraintRegex *regexp.Regexp
+
+// Used to validate an segment of ANDs is valid
+var validConstraintRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+func init() {
+ constraintOps = map[string]cfunc{
+ "": constraintTildeOrEqual,
+ "=": constraintTildeOrEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "=>": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "=<": constraintLessThanEqual,
+ "~": constraintTilde,
+ "~>": constraintTilde,
+ "^": constraintCaret,
+ }
+
+ ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
+
+ constraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ ops,
+ cvRegex))
+
+ constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
+ `\s*(%s)\s+-\s+(%s)\s*`,
+ cvRegex, cvRegex))
+
+ findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `(%s)\s*(%s)`,
+ ops,
+ cvRegex))
+
+ // The first time a constraint shows up will look slightly different from
+ // future times it shows up due to a leading space or comma in a given
+ // string.
+ validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`,
+ ops,
+ cvRegex,
+ ops,
+ cvRegex))
+}
+
+// An individual constraint
+type constraint struct {
+ // The version used in the constraint check. For example, if a constraint
+ // is '<= 2.0.0' the con a version instance representing 2.0.0.
+ con *Version
+
+ // The original parsed version (e.g., 4.x from != 4.x)
+ orig string
+
+ // The original operator for the constraint
+ origfunc string
+
+ // When an x is used as part of the version (e.g., 1.x)
+ minorDirty bool
+ dirty bool
+ patchDirty bool
+}
+
+// Check if a version meets the constraint
+func (c *constraint) check(v *Version, includePre bool) (bool, error) {
+ return constraintOps[c.origfunc](v, c, includePre)
+}
+
+// String prints an individual constraint into a string
+func (c *constraint) string() string {
+ return c.origfunc + c.orig
+}
+
+type cfunc func(v *Version, c *constraint, includePre bool) (bool, error)
+
+func parseConstraint(c string) (*constraint, error) {
+ if len(c) > 0 {
+ m := constraintRegex.FindStringSubmatch(c)
+ if m == nil {
+ return nil, fmt.Errorf("improper constraint: %s", c)
+ }
+
+ cs := &constraint{
+ orig: m[2],
+ origfunc: m[1],
+ }
+
+ ver := m[2]
+ minorDirty := false
+ patchDirty := false
+ dirty := false
+ if isX(m[3]) || m[3] == "" {
+ ver = fmt.Sprintf("0.0.0%s", m[6])
+ dirty = true
+ } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
+ minorDirty = true
+ dirty = true
+ ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+ } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" {
+ dirty = true
+ patchDirty = true
+ ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+ }
+
+ con, err := NewVersion(ver)
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint parser error")
+ }
+
+ cs.con = con
+ cs.minorDirty = minorDirty
+ cs.patchDirty = patchDirty
+ cs.dirty = dirty
+
+ return cs, nil
+ }
+
+ // The rest is the special case where an empty string was passed in which
+ // is equivalent to * or >=0.0.0
+ con, err := StrictNewVersion("0.0.0")
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint parser error")
+ }
+
+ cs := &constraint{
+ con: con,
+ orig: c,
+ origfunc: "",
+ minorDirty: false,
+ patchDirty: false,
+ dirty: true,
+ }
+ return cs, nil
+}
+
+// Constraint functions
+func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if c.dirty {
+ if c.con.Major() != v.Major() {
+ return true, nil
+ }
+ if c.con.Minor() != v.Minor() && !c.minorDirty {
+ return true, nil
+ } else if c.minorDirty {
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ } else if c.con.Patch() != v.Patch() && !c.patchDirty {
+ return true, nil
+ } else if c.patchDirty {
+ // Need to handle prereleases if present
+ if v.Prerelease() != "" || c.con.Prerelease() != "" {
+ eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+ }
+
+ eq := v.Equal(c.con)
+ if eq {
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) {
+
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ var eq bool
+
+ if !c.dirty {
+ eq = v.Compare(c.con) == 1
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ }
+
+ if v.Major() > c.con.Major() {
+ return true, nil
+ } else if v.Major() < c.con.Major() {
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ } else if c.minorDirty {
+ // This is a range case such as >11. When the version is something like
+ // 11.1.0 is it not > 11. For that we would need 12 or higher
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ } else if c.patchDirty {
+ // This is for ranges such as >11.1. A version of 11.1.1 is not greater
+ // which one of 11.2.1 is greater
+ eq = v.Minor() > c.con.Minor()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ }
+
+ // If we have gotten here we are not comparing pre-preleases and can use the
+ // Compare function to accomplish that.
+ eq = v.Compare(c.con) == 1
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+}
+
+func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ eq := v.Compare(c.con) < 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
+}
+
+func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ eq := v.Compare(c.con) >= 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+}
+
+func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ var eq bool
+
+ if !c.dirty {
+ eq = v.Compare(c.con) <= 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ }
+
+ if v.Major() > c.con.Major() {
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty {
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+// ~*, ~>* --> >= 0.0.0 (any)
+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
+func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if v.LessThan(c.con) {
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+ }
+
+ // ~0.0.0 is a special case where all constraints are accepted. It's
+ // equivalent to >= 0.0.0.
+ if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
+ !c.minorDirty && !c.patchDirty {
+ return true, nil
+ }
+
+ if v.Major() != c.con.Major() {
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+
+ if v.Minor() != c.con.Minor() && !c.minorDirty {
+ return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
+// it's a straight =
+func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if c.dirty {
+ return constraintTilde(v, c, includePre)
+ }
+
+ eq := v.Equal(c.con)
+ if eq {
+ return true, nil
+ }
+
+ return false, fmt.Errorf("%s is not equal to %s", v, c.orig)
+}
+
+// ^* --> (any)
+// ^1.2.3 --> >=1.2.3 <2.0.0
+// ^1.2 --> >=1.2.0 <2.0.0
+// ^1 --> >=1.0.0 <2.0.0
+// ^0.2.3 --> >=0.2.3 <0.3.0
+// ^0.2 --> >=0.2.0 <0.3.0
+// ^0.0.3 --> >=0.0.3 <0.0.4
+// ^0.0 --> >=0.0.0 <0.1.0
+// ^0 --> >=0.0.0 <1.0.0
+func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ // This less than handles prereleases
+ if v.LessThan(c.con) {
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+ }
+
+ var eq bool
+
+ // ^ when the major > 0 is >=x.y.z < x+1
+ if c.con.Major() > 0 || c.minorDirty {
+
+ // ^ has to be within a major range for > 0. Everything less than was
+ // filtered out with the LessThan call above. This filters out those
+ // that greater but not within the same major range.
+ eq = v.Major() == c.con.Major()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+
+ // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1
+ if c.con.Major() == 0 && v.Major() > 0 {
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+ // If the con Minor is > 0 it is not dirty
+ if c.con.Minor() > 0 || c.patchDirty {
+ eq = v.Minor() == c.con.Minor()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig)
+ }
+ // ^ when the minor is 0 and minor > 0 is =0.0.z
+ if c.con.Minor() == 0 && v.Minor() > 0 {
+ return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig)
+ }
+
+ // At this point the major is 0 and the minor is 0 and not dirty. The patch
+ // is not dirty so we need to check if they are equal. If they are not equal
+ eq = c.con.Patch() == v.Patch()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig)
+}
+
+func isX(x string) bool {
+ switch x {
+ case "x", "*", "X":
+ return true
+ default:
+ return false
+ }
+}
+
+func rewriteRange(i string) string {
+ m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+ if m == nil {
+ return i
+ }
+ o := i
+ for _, v := range m {
+ t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11])
+ o = strings.Replace(o, v[0], t, 1)
+ }
+
+ return o
+}
diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go
new file mode 100644
index 0000000000..74f97caa57
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/doc.go
@@ -0,0 +1,184 @@
+/*
+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
+
+Specifically it provides the ability to:
+
+ - Parse semantic versions
+ - Sort semantic versions
+ - Check if a semantic version fits within a set of constraints
+ - Optionally work with a `v` prefix
+
+# Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an optional error can be returned if there is an issue
+parsing the version. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+b345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. For more details please see the documentation
+at https://godoc.org/github.com/Masterminds/semver.
+
+# Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+ raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+ vs := make([]*semver.Version, len(raw))
+ for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+ }
+
+ sort.Sort(semver.Collection(vs))
+
+# Checking Version Constraints and Comparing Versions
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other is using Constraints. There are some important
+differences to notes between these two methods of comparison.
+
+ 1. When two versions are compared using functions such as `Compare`, `LessThan`,
+ and others it will follow the specification and always include prereleases
+ within the comparison. It will provide an answer valid with the comparison
+ spec section at https://semver.org/#spec-item-11
+ 2. When constraint checking is used for checks or validation it will follow a
+ different set of rules that are common for ranges with tools like npm/js
+ and Rust/Cargo. This includes considering prereleases to be invalid if the
+ ranges does not include on. If you want to have it include pre-releases a
+ simple solution is to include `-0` in your range.
+ 3. Constraint ranges can have some complex rules including the shorthard use of
+ ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns which PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+ c, err := semver.NewConstraint(">= 1.2.3")
+ if err != nil {
+ // Handle constraint not being parsable.
+ }
+
+ v, err := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parsable.
+ }
+ // Check if the version meets the constraints. The a variable will be true.
+ a := c.Check(v)
+
+# Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma or space separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3. This can also be written as
+`">= 1.2, < 3.0.0 || >= 4.2.3"`
+
+The basic comparisons are:
+
+ - `=`: equal (aliased to no operator)
+ - `!=`: not equal
+ - `>`: greater than
+ - `<`: less than
+ - `>=`: greater than or equal to
+ - `<=`: less than or equal to
+
+# Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+ - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+ - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+# Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the tilde operation. For example,
+
+ - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+ - `>= 1.2.x` is equivalent to `>= 1.2.0`
+ - `<= 2.x` is equivalent to `<= 3`
+ - `*` is equivalent to `>= 0.0.0`
+
+Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+ - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0`
+ - `~1` is equivalent to `>= 1, < 2`
+ - `~2.3` is equivalent to `>= 2.3 < 2.4`
+ - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+ - `~1.x` is equivalent to `>= 1 < 2`
+
+Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+ - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+ - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+ - `^2.3` is equivalent to `>= 2.3, < 3`
+ - `^2.x` is equivalent to `>= 2.0.0, < 3`
+ - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+ - `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+ - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+ - `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+ - `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+# Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+ c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, _ := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+
+ // Validate a version against a constraint.
+ a, msgs := c.Validate(v)
+ // a is false
+ for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+ }
+*/
+package semver
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
new file mode 100644
index 0000000000..7a3ba73887
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -0,0 +1,788 @@
+package semver
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled version of the regex created at init() is cached here so it
+// only needs to be created once.
+var versionRegex *regexp.Regexp
+var looseVersionRegex *regexp.Regexp
+
+// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are
+// not allowed in a valid semantic version. When set to true, NewVersion will coerce
+// leading 0's into a valid version.
+var CoerceNewVersion = true
+
+// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion
+// function. This is used when CoerceNewVersion is set to false. If set to false
+// ErrInvalidSemVer is returned for an invalid version. This does not apply to
+// StrictNewVersion. Setting this function to false returns errors more quickly.
+var DetailedNewVersionErrors = true
+
+var (
+ // ErrInvalidSemVer is returned a version is found to be invalid when
+ // being parsed.
+ ErrInvalidSemVer = errors.New("invalid semantic version")
+
+ // ErrEmptyString is returned when an empty string is passed in for parsing.
+ ErrEmptyString = errors.New("version string empty")
+
+ // ErrInvalidCharacters is returned when invalid characters are found as
+ // part of a version
+ ErrInvalidCharacters = errors.New("invalid characters in version")
+
+ // ErrSegmentStartsZero is returned when a version segment starts with 0.
+ // This is invalid in SemVer.
+ ErrSegmentStartsZero = errors.New("version segment starts with 0")
+
+ // ErrInvalidMetadata is returned when the metadata is an invalid format
+ ErrInvalidMetadata = errors.New("invalid metadata string")
+
+ // ErrInvalidPrerelease is returned when the pre-release is an invalid format
+ ErrInvalidPrerelease = errors.New("invalid prerelease string")
+)
+
+// semVerRegex is the regular expression used to parse a semantic version.
+// This is not the official regex from the semver spec. It has been modified to allow for loose handling
+// where versions like 2.1 are detected.
+const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
+ `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
+ `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
+
+// looseSemVerRegex is a regular expression that lets invalid semver expressions through
+// with enough detail that certain errors can be checked for.
+const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+// Version represents a single semantic version.
+type Version struct {
+ major, minor, patch uint64
+ pre string
+ metadata string
+ original string
+}
+
+func init() {
+ versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
+ looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$")
+}
+
+const (
+ num string = "0123456789"
+ allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num
+)
+
+// StrictNewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. Only parses valid semantic versions.
+// Performs checking that can find errors within the version.
+// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x
+// releases of semver did, use the NewVersion() function.
+func StrictNewVersion(v string) (*Version, error) {
+ // Parsing here does not use RegEx in order to increase performance and reduce
+ // allocations.
+
+ if len(v) == 0 {
+ return nil, ErrEmptyString
+ }
+
+ // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build
+ parts := strings.SplitN(v, ".", 3)
+ if len(parts) != 3 {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ original: v,
+ }
+
+ // Extract build metadata
+ if strings.Contains(parts[2], "+") {
+ extra := strings.SplitN(parts[2], "+", 2)
+ sv.metadata = extra[1]
+ parts[2] = extra[0]
+ if err := validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ // Extract build prerelease
+ if strings.Contains(parts[2], "-") {
+ extra := strings.SplitN(parts[2], "-", 2)
+ sv.pre = extra[1]
+ parts[2] = extra[0]
+ if err := validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ // Validate the number segments are valid. This includes only having positive
+ // numbers and no leading 0's.
+ for _, p := range parts {
+ if !containsOnly(p, num) {
+ return nil, ErrInvalidCharacters
+ }
+
+ if len(p) > 1 && p[0] == '0' {
+ return nil, ErrSegmentStartsZero
+ }
+ }
+
+ // Extract major, minor, and patch
+ var err error
+ sv.major, err = strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ sv.minor, err = strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ sv.patch, err = strconv.ParseUint(parts[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return sv, nil
+}
+
+// NewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. If the version is SemVer-ish it
+// attempts to convert it to SemVer. If you want to validate it was a strict
+// semantic version at parse time see StrictNewVersion().
+func NewVersion(v string) (*Version, error) {
+ if CoerceNewVersion {
+ return coerceNewVersion(v)
+ }
+ m := versionRegex.FindStringSubmatch(v)
+ if m == nil {
+
+ // Disabling detailed errors is first so that it is in the fast path.
+ if !DetailedNewVersionErrors {
+ return nil, ErrInvalidSemVer
+ }
+
+ // Check for specific errors with the semver string and return a more detailed
+ // error.
+ m = looseVersionRegex.FindStringSubmatch(v)
+ if m == nil {
+ return nil, ErrInvalidSemVer
+ }
+ err := validateVersion(m)
+ if err != nil {
+ return nil, err
+ }
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ metadata: m[5],
+ pre: m[4],
+ original: v,
+ }
+
+ var err error
+ sv.major, err = strconv.ParseUint(m[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+
+ if m[2] != "" {
+ sv.minor, err = strconv.ParseUint(m[2], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.minor = 0
+ }
+
+ if m[3] != "" {
+ sv.patch, err = strconv.ParseUint(m[3], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.patch = 0
+ }
+
+ // Perform some basic due diligence on the extra parts to ensure they are
+ // valid.
+
+ if sv.pre != "" {
+ if err = validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ if sv.metadata != "" {
+ if err = validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ return sv, nil
+}
+
+func coerceNewVersion(v string) (*Version, error) {
+ m := looseVersionRegex.FindStringSubmatch(v)
+ if m == nil {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ metadata: m[8],
+ pre: m[5],
+ original: v,
+ }
+
+ var err error
+ sv.major, err = strconv.ParseUint(m[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+
+ if m[2] != "" {
+ sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.minor = 0
+ }
+
+ if m[3] != "" {
+ sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.patch = 0
+ }
+
+ // Perform some basic due diligence on the extra parts to ensure they are
+ // valid.
+
+ if sv.pre != "" {
+ if err = validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ if sv.metadata != "" {
+ if err = validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ return sv, nil
+}
+
+// New creates a new instance of Version with each of the parts passed in as
+// arguments instead of parsing a version string.
+func New(major, minor, patch uint64, pre, metadata string) *Version {
+ v := Version{
+ major: major,
+ minor: minor,
+ patch: patch,
+ pre: pre,
+ metadata: metadata,
+ original: "",
+ }
+
+ v.original = v.String()
+
+ return &v
+}
+
+// MustParse parses a given version and panics on error.
+func MustParse(v string) *Version {
+ sv, err := NewVersion(v)
+ if err != nil {
+ panic(err)
+ }
+ return sv
+}
+
+// String converts a Version object to a string.
+// Note, if the original version contained a leading v this version will not.
+// See the Original() method to retrieve the original value. Semantic Versions
+// don't contain a leading v per the spec. Instead it's optional on
+// implementation.
+func (v Version) String() string {
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
+
+// Original returns the original value passed in to be parsed.
+func (v *Version) Original() string {
+ return v.original
+}
+
+// Major returns the major version.
+func (v Version) Major() uint64 {
+ return v.major
+}
+
+// Minor returns the minor version.
+func (v Version) Minor() uint64 {
+ return v.minor
+}
+
+// Patch returns the patch version.
+func (v Version) Patch() uint64 {
+ return v.patch
+}
+
+// Prerelease returns the pre-release version.
+func (v Version) Prerelease() string {
+ return v.pre
+}
+
+// Metadata returns the metadata on the version.
+func (v Version) Metadata() string {
+ return v.metadata
+}
+
+// originalVPrefix returns the original 'v' prefix if any.
+func (v Version) originalVPrefix() string {
+ // Note, only lowercase v is supported as a prefix by the parser.
+ if v.original != "" && v.original[:1] == "v" {
+ return v.original[:1]
+ }
+ return ""
+}
+
+// IncPatch produces the next patch version.
+// If the current version does not have prerelease/metadata information,
+// it unsets metadata and prerelease values, increments patch number.
+// If the current version has any of prerelease or metadata information,
+// it unsets both values and keeps current patch value
+func (v Version) IncPatch() Version {
+ vNext := v
+ // according to http://semver.org/#spec-item-9
+ // Pre-release versions have a lower precedence than the associated normal version.
+ // according to http://semver.org/#spec-item-10
+ // Build metadata SHOULD be ignored when determining version precedence.
+ if v.pre != "" {
+ vNext.metadata = ""
+ vNext.pre = ""
+ } else {
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = v.patch + 1
+ }
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMinor produces the next minor version.
+// Sets patch to 0.
+// Increments minor number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMinor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = v.minor + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMajor produces the next major version.
+// Sets patch to 0.
+// Sets minor to 0.
+// Increments major number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMajor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = 0
+ vNext.major = v.major + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// SetPrerelease defines the prerelease value.
+// Value must not include the required 'hyphen' prefix.
+func (v Version) SetPrerelease(prerelease string) (Version, error) {
+ vNext := v
+ if len(prerelease) > 0 {
+ if err := validatePrerelease(prerelease); err != nil {
+ return vNext, err
+ }
+ }
+ vNext.pre = prerelease
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// SetMetadata defines metadata value.
+// Value must not include the required 'plus' prefix.
+func (v Version) SetMetadata(metadata string) (Version, error) {
+ vNext := v
+ if len(metadata) > 0 {
+ if err := validateMetadata(metadata); err != nil {
+ return vNext, err
+ }
+ }
+ vNext.metadata = metadata
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// LessThan tests if one version is less than another one.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// LessThanEqual tests if one version is less or equal than another one.
+func (v *Version) LessThanEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
+// GreaterThan tests if one version is greater than another one.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// GreaterThanEqual tests if one version is greater or equal than another one.
+func (v *Version) GreaterThanEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
+// Equal tests if two versions are equal to each other.
+// Note, versions can be equal with different metadata since metadata
+// is not considered part of the comparable version.
+func (v *Version) Equal(o *Version) bool {
+ if v == o {
+ return true
+ }
+ if v == nil || o == nil {
+ return false
+ }
+ return v.Compare(o) == 0
+}
+
+// Compare compares this version to another one. It returns -1, 0, or 1 if
+// the version smaller, equal, or larger than the other version.
+//
+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
+// lower than the version without a prerelease. Compare always takes into account
+// prereleases. If you want to work with ranges using typical range syntaxes that
+// skip prereleases if the range is not looking for them use constraints.
+func (v *Version) Compare(o *Version) int {
+ // Compare the major, minor, and patch version for differences. If a
+ // difference is found return the comparison.
+ if d := compareSegment(v.Major(), o.Major()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
+ return d
+ }
+
+ // At this point the major, minor, and patch versions are the same.
+ ps := v.pre
+ po := o.Prerelease()
+
+ if ps == "" && po == "" {
+ return 0
+ }
+ if ps == "" {
+ return 1
+ }
+ if po == "" {
+ return -1
+ }
+
+ return comparePrerelease(ps, po)
+}
+
+// UnmarshalJSON implements JSON.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ return nil
+}
+
+// MarshalJSON implements JSON.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (v *Version) UnmarshalText(text []byte) error {
+ temp, err := NewVersion(string(text))
+ if err != nil {
+ return err
+ }
+
+ *v = *temp
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (v Version) MarshalText() ([]byte, error) {
+ return []byte(v.String()), nil
+}
+
+// Scan implements the SQL.Scanner interface.
+func (v *Version) Scan(value interface{}) error {
+ var s string
+ s, _ = value.(string)
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ return nil
+}
+
+// Value implements the Driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
+
+func compareSegment(v, o uint64) int {
+ if v < o {
+ return -1
+ }
+ if v > o {
+ return 1
+ }
+
+ return 0
+}
+
+func comparePrerelease(v, o string) int {
+ // split the prelease versions by their part. The separator, per the spec,
+ // is a .
+ sparts := strings.Split(v, ".")
+ oparts := strings.Split(o, ".")
+
+ // Find the longer length of the parts to know how many loop iterations to
+ // go through.
+ slen := len(sparts)
+ olen := len(oparts)
+
+ l := slen
+ if olen > slen {
+ l = olen
+ }
+
+ // Iterate over each part of the prereleases to compare the differences.
+ for i := 0; i < l; i++ {
+ // Since the lentgh of the parts can be different we need to create
+ // a placeholder. This is to avoid out of bounds issues.
+ stemp := ""
+ if i < slen {
+ stemp = sparts[i]
+ }
+
+ otemp := ""
+ if i < olen {
+ otemp = oparts[i]
+ }
+
+ d := comparePrePart(stemp, otemp)
+ if d != 0 {
+ return d
+ }
+ }
+
+ // Reaching here means two versions are of equal value but have different
+ // metadata (the part following a +). They are not identical in string form
+ // but the version comparison finds them to be equal.
+ return 0
+}
+
+func comparePrePart(s, o string) int {
+ // Fastpath if they are equal
+ if s == o {
+ return 0
+ }
+
+ // When s or o are empty we can use the other in an attempt to determine
+ // the response.
+ if s == "" {
+ if o != "" {
+ return -1
+ }
+ return 1
+ }
+
+ if o == "" {
+ if s != "" {
+ return 1
+ }
+ return -1
+ }
+
+ // When comparing strings "99" is greater than "103". To handle
+ // cases like this we need to detect numbers and compare them. According
+ // to the semver spec, numbers are always positive. If there is a - at the
+ // start like -99 this is to be evaluated as an alphanum. numbers always
+ // have precedence over alphanum. Parsing as Uints because negative numbers
+ // are ignored.
+
+ oi, n1 := strconv.ParseUint(o, 10, 64)
+ si, n2 := strconv.ParseUint(s, 10, 64)
+
+ // The case where both are strings compare the strings
+ if n1 != nil && n2 != nil {
+ if s > o {
+ return 1
+ }
+ return -1
+ } else if n1 != nil {
+ // o is a string and s is a number
+ return -1
+ } else if n2 != nil {
+ // s is a string and o is a number
+ return 1
+ }
+ // Both are numbers
+ if si > oi {
+ return 1
+ }
+ return -1
+}
+
+// Like strings.ContainsAny but does an only instead of any.
+func containsOnly(s string, comp string) bool {
+ return strings.IndexFunc(s, func(r rune) bool {
+ return !strings.ContainsRune(comp, r)
+ }) == -1
+}
+
+// From the spec, "Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty.
+// Numeric identifiers MUST NOT include leading zeroes.". These segments can
+// be dot separated.
+func validatePrerelease(p string) error {
+ eparts := strings.Split(p, ".")
+ for _, p := range eparts {
+ if p == "" {
+ return ErrInvalidPrerelease
+ } else if containsOnly(p, num) {
+ if len(p) > 1 && p[0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ } else if !containsOnly(p, allowed) {
+ return ErrInvalidPrerelease
+ }
+ }
+
+ return nil
+}
+
+// From the spec, "Build metadata MAY be denoted by
+// appending a plus sign and a series of dot separated identifiers immediately
+// following the patch or pre-release version. Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."
+func validateMetadata(m string) error {
+ eparts := strings.Split(m, ".")
+ for _, p := range eparts {
+ if p == "" {
+ return ErrInvalidMetadata
+ } else if !containsOnly(p, allowed) {
+ return ErrInvalidMetadata
+ }
+ }
+ return nil
+}
+
+// validateVersion checks for common validation issues but may not catch all errors
+func validateVersion(m []string) error {
+ var err error
+ var v string
+ if m[1] != "" {
+ if len(m[1]) > 1 && m[1][0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ _, err = strconv.ParseUint(m[1], 10, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing version segment: %w", err)
+ }
+ }
+
+ if m[2] != "" {
+ v = strings.TrimPrefix(m[2], ".")
+ if len(v) > 1 && v[0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ _, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing version segment: %w", err)
+ }
+ }
+
+ if m[3] != "" {
+ v = strings.TrimPrefix(m[3], ".")
+ if len(v) > 1 && v[0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ _, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing version segment: %w", err)
+ }
+ }
+
+ if m[5] != "" {
+ if err = validatePrerelease(m[5]); err != nil {
+ return err
+ }
+ }
+
+ if m[8] != "" {
+ if err = validateMetadata(m[8]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore
new file mode 100644
index 0000000000..f1c181ec9c
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore
@@ -0,0 +1,12 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
new file mode 100644
index 0000000000..38cb9ae101
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
@@ -0,0 +1,104 @@
+# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
+
+linters-settings:
+ depguard:
+ rules:
+ prevent_unmaintained_packages:
+ list-mode: strict
+ files:
+ - $all
+ - "!$test"
+ allow:
+ - $gostd
+ - github.com/x448/float16
+ deny:
+ - pkg: io/ioutil
+ desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
+ dupl:
+ threshold: 100
+ funlen:
+ lines: 100
+ statements: 50
+ goconst:
+ ignore-tests: true
+ min-len: 2
+ min-occurrences: 3
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - experimental
+ - opinionated
+ - performance
+ - style
+ disabled-checks:
+ - commentedOutCode
+ - dupImport # https://github.com/go-critic/go-critic/issues/845
+ - ifElseChain
+ - octalLiteral
+ - paramTypeCombine
+ - whyNoLint
+ gofmt:
+ simplify: false
+ goimports:
+ local-prefixes: github.com/fxamacker/cbor
+ golint:
+ min-confidence: 0
+ govet:
+ check-shadowing: true
+ lll:
+ line-length: 140
+ maligned:
+ suggest-new: true
+ misspell:
+ locale: US
+ staticcheck:
+ checks: ["all"]
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - bidichk
+ - depguard
+ - errcheck
+ - exportloopref
+ - goconst
+ - gocritic
+ - gocyclo
+ - gofmt
+ - goimports
+ - goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nilerr
+ - revive
+ - staticcheck
+ - stylecheck
+ - typecheck
+ - unconvert
+ - unused
+
+issues:
+ # max-issues-per-linter default is 50. Set to 0 to disable limit.
+ max-issues-per-linter: 0
+ # max-same-issues default is 3. Set to 0 to disable limit.
+ max-same-issues: 0
+
+ exclude-rules:
+ - path: decode.go
+ text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string `, ` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
+ - path: valid.go
+ text: "string ` for type ` has (\\d+) occurrences, make it a constant"
+ - path: valid.go
+ text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..c794b2b0c6
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
@@ -0,0 +1,133 @@
+
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email address,
+ without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+faye.github@gmail.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of
+actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
+[https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..de0965e12d
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
@@ -0,0 +1,41 @@
+# How to contribute
+
+You can contribute by using the library, opening issues, or opening pull requests.
+
+## Bug reports and security vulnerabilities
+
+Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
+
+To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
+
+Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
+
+## Pull requests
+
+Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
+
+Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
+
+See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
+
+Pull requests have a greater chance of being approved if:
+- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
+- it has > 97% code coverage.
+
+## Describe your issue
+
+Clearly describe the issue:
+* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
+* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
+* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
+
+## Please don't
+
+Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
+
+Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
+
+## Credits
+
+- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
+- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE
new file mode 100644
index 0000000000..eaa8504921
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019-present Faye Amacker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md
new file mode 100644
index 0000000000..d072b81c73
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/README.md
@@ -0,0 +1,934 @@
+
CBOR Codec 
+
+[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
+
+CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
+
+`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
+
+See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
+
+## fxamacker/cbor
+
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
+[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
+[](#fuzzing-and-code-coverage)
+[](https://goreportcard.com/report/github.com/fxamacker/cbor)
+[](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
+
+API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
+
+Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
+
+ 🔎 Highlights
+
+__🚀 Speed__
+
+Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
+
+__🔒 Security__
+
+Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+
+Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
+
+__🗜️ Data Size__
+
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
+
+__:jigsaw: Usability__
+
+API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
+
+Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
+
+__📆 Extensibility__
+
+Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
+
+
+
+
+
+### Secure Decoding with Configurable Settings
+
+`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
+
+Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
+
+> [!NOTE]
+> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op |
+>
+> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
+>
+> 🔎 Benchmark details
+>
+> Latest comparison for decoding CBOR data to Go `[]byte`:
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
+> - go test -bench=. -benchmem -count=20
+>
+> #### Prior comparisons
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
+> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
+> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
+>
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.19.6, linux/amd64, i5-13600K (DDR4)
+> - go test -bench=. -benchmem -count=20
+>
+>
+
+In contrast, some codecs can crash or use excessive resources while decoding bad data.
+
+> [!WARNING]
+> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+>
+> 🔎 gob fatal error (out of memory) 💥 decoding 181 bytes
+>
+> ```Go
+> // Example of encoding/gob having "fatal error: runtime: out of memory"
+> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
+> package main
+> import (
+> "bytes"
+> "encoding/gob"
+> "encoding/hex"
+> "fmt"
+> )
+>
+> // Example data is from https://github.com/golang/go/issues/24446
+> // (shortened to 181 bytes).
+> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
+> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
+> "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
+> "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
+> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
+> "303030303030303030303030303030303030303030303030303030303030" +
+> "30"
+>
+> type X struct {
+> J *X
+> K map[string]int
+> }
+>
+> func main() {
+> raw, _ := hex.DecodeString(data)
+> decoder := gob.NewDecoder(bytes.NewReader(raw))
+>
+> var x X
+> decoder.Decode(&x) // fatal error: runtime: out of memory
+> fmt.Println("Decoding finished.")
+> }
+> ```
+>
+>
+>
+
+### Smaller Encodings with Struct Tag Options
+
+Struct tags automatically reduce encoded size of structs and improve speed.
+
+We can write less code by using struct tag options:
+- `toarray`: encode without field names (decode back to original struct)
+- `keyasint`: encode field names as integers (decode back to original struct)
+- `omitempty`: omit empty field when encoding
+- `omitzero`: omit zero-value field when encoding
+
+As a special case, struct field tag "-" omits the field.
+
+NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field.
+
+
+
+> [!NOTE]
+> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
+> - `encoding/json`: 18 bytes of JSON
+> - `fxamacker/cbor`: 1 byte of CBOR
+>
+> 🔎 Encoding 3-level nested Go struct with omitempty
+>
+> https://go.dev/play/p/YxwvfPdFQG2
+>
+> ```Go
+> // Example encoding nested struct (with omitempty tag)
+> // - encoding/json: 18 byte JSON
+> // - fxamacker/cbor: 1 byte CBOR
+>
+> package main
+>
+> import (
+> "encoding/hex"
+> "encoding/json"
+> "fmt"
+>
+> "github.com/fxamacker/cbor/v2"
+> )
+>
+> type GrandChild struct {
+> Quux int `json:",omitempty"`
+> }
+>
+> type Child struct {
+> Baz int `json:",omitempty"`
+> Qux GrandChild `json:",omitempty"`
+> }
+>
+> type Parent struct {
+> Foo Child `json:",omitempty"`
+> Bar int `json:",omitempty"`
+> }
+>
+> func cb() {
+> results, _ := cbor.Marshal(Parent{})
+> fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+>
+> text, _ := cbor.Diagnose(results) // Diagnostic Notation
+> fmt.Println("DN: " + text)
+> }
+>
+> func js() {
+> results, _ := json.Marshal(Parent{})
+> fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+>
+> text := string(results) // JSON
+> fmt.Println("JSON: " + text)
+> }
+>
+> func main() {
+> cb()
+> fmt.Println("-------------")
+> js()
+> }
+> ```
+>
+> Output (DN is Diagnostic Notation):
+> ```
+> hex(CBOR): a0
+> DN: {}
+> -------------
+> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+> JSON: {"Foo":{"Qux":{}}}
+> ```
+>
+>
+
+
+## Quick Start
+
+__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
+
+> [!TIP]
+>
+> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
+>
+> 🔎 More about tinygo feature branch
+>
+> ### Tinygo
+>
+> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
+>
+> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
+>
+> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
+>
+> Changes in this feature branch only affect tinygo compiled software. Summary of changes:
+> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
+> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
+> - encoding error message can be different when encoding function type.
+>
+> Related tinygo issues:
+> - https://github.com/tinygo-org/tinygo/issues/4277
+> - https://github.com/tinygo-org/tinygo/issues/4458
+>
+>
+
+
+### Key Points
+
+This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
+
+- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
+- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
+
+Configurable limits and options can be used to balance trade-offs.
+
+- Encoding and decoding modes are created from options (settings).
+- Modes can be created at startup and reused.
+- Modes are safe for concurrent use.
+
+### Default Mode
+
+Package level functions only use this library's default settings.
+They provide the "default mode" of encoding and decoding.
+
+```go
+// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
+b, err = cbor.Marshal(v) // encode v to []byte b
+err = cbor.Unmarshal(b, &v) // decode []byte b to v
+decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
+err = decoder.Decode(&v) // decode a CBOR data item to v
+
+// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
+err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
+
+// v2.5.0 added new functions that return remaining bytes.
+
+// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
+rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
+
+// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
+text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
+
+// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
+// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
+```
+
+> [!IMPORTANT]
+> CBOR settings allow trade-offs between speed, security, encoding size, etc.
+>
+> - Different CBOR libraries may use different default settings.
+> - CBOR-based formats or protocols usually require specific settings.
+>
+> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
+
+### Presets
+
+Presets can be used as-is or as a starting point for custom settings.
+
+```go
+// EncOptions is a struct of encoder settings.
+func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
+func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
+func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
+func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
+```
+
+Presets are used to create custom modes.
+
+### Custom Modes
+
+Modes are created from settings. Once created, modes have immutable settings.
+
+💡 Create the mode at startup and reuse it. It is safe for concurrent use.
+
+```Go
+// Create encoding mode.
+opts := cbor.CoreDetEncOptions() // use preset options as a starting point
+opts.Time = cbor.TimeUnix // change any settings if needed
+em, err := opts.EncMode() // create an immutable encoding mode
+
+// Reuse the encoding mode. It is safe for concurrent use.
+
+// API matches encoding/json.
+b, err := em.Marshal(v) // encode v to []byte b
+encoder := em.NewEncoder(w) // create encoder with io.Writer w
+err := encoder.Encode(v) // encode v to io.Writer w
+```
+
+Default mode and custom modes automatically apply struct tags.
+
+### User Specified Buffer for Encoding (v2.7.0)
+
+`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
+
+```Go
+em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
+
+var buf bytes.Buffer
+err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
+```
+
+### Struct Tags
+
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
+
+As a special case, struct field tag "-" omits the field.
+
+ 🔎 Example encoding with struct field tag "-"
+
+https://go.dev/play/p/aWEIFxd7InX
+
+```Go
+// https://github.com/fxamacker/cbor/issues/652
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// The `cbor:"-"` tag omits the Type field when encoding to CBOR.
+type Entity struct {
+ _ struct{} `cbor:",toarray"`
+ ID uint64 `json:"id"`
+ Type string `cbor:"-" json:"typeOf"`
+ Name string `json:"name"`
+}
+
+func main() {
+ entity := Entity{
+ ID: 1,
+ Type: "int64",
+ Name: "Identifier",
+ }
+
+ c, _ := cbor.Marshal(entity)
+ diag, _ := cbor.Diagnose(c)
+ fmt.Printf("CBOR in hex: %x\n", c)
+ fmt.Printf("CBOR in edn: %s\n", diag)
+
+ j, _ := json.Marshal(entity)
+ fmt.Printf("JSON: %s\n", string(j))
+
+ fmt.Printf("JSON encoding is %d bytes\n", len(j))
+ fmt.Printf("CBOR encoding is %d bytes\n", len(c))
+
+ // Output:
+ // CBOR in hex: 82016a4964656e746966696572
+ // CBOR in edn: [1, "Identifier"]
+ // JSON: {"id":1,"typeOf":"int64","name":"Identifier"}
+ // JSON encoding is 45 bytes
+ // CBOR encoding is 13 bytes
+}
+```
+
+
+
+ 🔎 Example encoding 3-level nested Go struct to 1 byte CBOR
+
+https://go.dev/play/p/YxwvfPdFQG2
+
+```Go
+// Example encoding nested struct (with omitempty tag)
+// - encoding/json: 18 byte JSON
+// - fxamacker/cbor: 1 byte CBOR
+package main
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type GrandChild struct {
+ Quux int `json:",omitempty"`
+}
+
+type Child struct {
+ Baz int `json:",omitempty"`
+ Qux GrandChild `json:",omitempty"`
+}
+
+type Parent struct {
+ Foo Child `json:",omitempty"`
+ Bar int `json:",omitempty"`
+}
+
+func cb() {
+ results, _ := cbor.Marshal(Parent{})
+ fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+
+ text, _ := cbor.Diagnose(results) // Diagnostic Notation
+ fmt.Println("DN: " + text)
+}
+
+func js() {
+ results, _ := json.Marshal(Parent{})
+ fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+
+ text := string(results) // JSON
+ fmt.Println("JSON: " + text)
+}
+
+func main() {
+ cb()
+ fmt.Println("-------------")
+ js()
+}
+```
+
+Output (DN is Diagnostic Notation):
+```
+hex(CBOR): a0
+DN: {}
+-------------
+hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+JSON: {"Foo":{"Qux":{}}}
+```
+
+
+
+
+
+ 🔎 Example using struct tag options
+
+
+
+
+
+Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
+
+### CBOR Tags
+
+CBOR tags are specified in a `TagSet`.
+
+Custom modes can be created with a `TagSet` to handle CBOR tags.
+
+```go
+em, err := opts.EncMode() // no CBOR tags
+em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
+em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
+```
+
+`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
+
+ 🔎 Example using TagSet and TagOptions
+
+```go
+// Use signedCWT struct defined in "Decoding CWT" example.
+
+// Create TagSet (safe for concurrency).
+tags := cbor.NewTagSet()
+// Register tag COSE_Sign1 18 with signedCWT type.
+tags.Add(
+ cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
+ reflect.TypeOf(signedCWT{}),
+ 18)
+
+// Create DecMode with immutable tags.
+dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
+
+// Unmarshal to signedCWT with tag support.
+var v signedCWT
+if err := dm.Unmarshal(data, &v); err != nil {
+ return err
+}
+
+// Create EncMode with immutable tags.
+em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
+
+// Marshal signedCWT with tag number.
+if data, err := em.Marshal(v); err != nil {
+ return err
+}
+```
+
+
+
+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces.
+
+Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc.
+
+The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2).
+
+ 🔎 Example using Embedded JSON Tag for CBOR (tag 262)
+
+```go
+// https://github.com/fxamacker/cbor/issues/657
+
+package cbor_test
+
+// NOTE: RFC 8949 does not mention tag number 262. IANA assigned
+// CBOR tag number 262 as "Embedded JSON Object" specified by the
+// document Embedded JSON Tag for CBOR:
+//
+// "Tag 262 can be applied to a byte string (major type 2) to indicate
+// that the byte string is a JSON Object. The length of the byte string
+// indicates the content."
+//
+// For more info, see Embedded JSON Tag for CBOR at:
+// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// cborTagNumForEmbeddedJSON is the CBOR tag number 262.
+const cborTagNumForEmbeddedJSON = 262
+
+// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item
+// with tag number 262 and the tag content is a JSON object "embedded" as a
+// CBOR byte string (major type 2).
+type EmbeddedJSON struct {
+ any
+}
+
+func NewEmbeddedJSON(val any) EmbeddedJSON {
+ return EmbeddedJSON{val}
+}
+
+// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the
+// tag number 262 and the tag content is a JSON object that is
+// "embedded" as a CBOR byte string.
+func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) {
+ // Encode v to JSON object.
+ data, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create cbor.Tag representing a tagged CBOR data item.
+ tag := cbor.Tag{
+ Number: cborTagNumForEmbeddedJSON,
+ Content: data,
+ }
+
+ // Marshal to a tagged CBOR data item.
+ return cbor.Marshal(tag)
+}
+
+// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON.
+// The byte slice provided to this function must contain a single
+// tagged CBOR data item with the tag number 262 and tag content
+// must be a JSON object "embedded" as a CBOR byte string.
+func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error {
+ // Unmarshal tagged CBOR data item.
+ var tag cbor.Tag
+ if err := cbor.Unmarshal(b, &tag); err != nil {
+ return err
+ }
+
+ // Check tag number.
+ if tag.Number != cborTagNumForEmbeddedJSON {
+ return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON)
+ }
+
+ // Check tag content.
+ jsonData, isByteString := tag.Content.([]byte)
+ if !isByteString {
+ return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content)
+ }
+
+ // Unmarshal JSON object.
+ return json.Unmarshal(jsonData, v)
+}
+
+// MarshalJSON encodes EmbeddedJSON to a JSON object.
+func (v EmbeddedJSON) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.any)
+}
+
+// UnmarshalJSON decodes a JSON object.
+func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(b))
+ dec.UseNumber()
+ return dec.Decode(&v.any)
+}
+
+func Example_embeddedJSONTagForCBOR() {
+ value := NewEmbeddedJSON(map[string]any{
+ "name": "gopher",
+ "id": json.Number("42"),
+ })
+
+ data, err := cbor.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("cbor: %x\n", data)
+
+ var v EmbeddedJSON
+ err = cbor.Unmarshal(data, &v)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%+v\n", v.any)
+ for k, v := range v.any.(map[string]any) {
+ fmt.Printf(" %s: %v (%T)\n", k, v, v)
+ }
+}
+```
+
+
+
+
+### Functions and Interfaces
+
+ 🔎 Functions and interfaces at a glance
+
+Common functions with same API as `encoding/json`:
+- `Marshal`, `Unmarshal`
+- `NewEncoder`, `(*Encoder).Encode`
+- `NewDecoder`, `(*Decoder).Decode`
+
+NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
+because RFC 8949 treats CBOR data item with remaining bytes as malformed.
+- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
+
+Other useful functions:
+- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
+- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
+- `Wellformed` returns true if the CBOR data item is well-formed.
+
+Interfaces identical or comparable to Go `encoding` packages include:
+`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
+
+The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
+
+
+
+### Security Tips
+
+🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
+
+Default limits may need to be increased for systems handling very large data (e.g. blockchains).
+
+`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
+
+## Status
+
+[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs.
+- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string.
+- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function.
+- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR.
+
+v2.9.0 passed fuzz tests and is production quality.
+
+The minimum version of Go required to build:
+- v2.8.0 and newer releases require go 1.20+.
+- v2.7.1 and older releases require go 1.17+.
+
+For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
+
+### Prior Releases
+
+[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality.
+
+[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
+
+[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
+
+[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+
+__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
+
+See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
+
+See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
+
+
+
+## Who uses fxamacker/cbor
+
+`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Confidential Computing Consortium, ConsenSys, EdgeX Foundry, F5, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes, Let's Encrypt (ISRG), Linaro, Linux Foundation, Matrix.org, Microsoft, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Red Hat OpenShift, Smallstep, Tailscale, Taurus SA, TIBCO, Veraison, and others.
+
+`fxamacker/cbor` passed multiple confidential security assessments in 2022. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) assessed a subset of fxamacker/cbor v2.4.
+
+## Standards
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Notable CBOR features include:
+
+| CBOR Feature | Description |
+| :--- | :--- |
+| CBOR tags | API supports built-in and user-defined tags. |
+| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
+| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
+| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
+| Indefinite length data | Option to allow/forbid for encoding and decoding. |
+| Well-formedness | Always checked and enforced. |
+| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
+| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
+
+Known limitations are noted in the [Limitations section](#limitations).
+
+Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
+
+Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
+
+After well-formedness is verified, basic validity errors are handled as follows:
+
+* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
+* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
+
+When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
+
+By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
+
+__Click to expand topic:__
+
+
+ 🔎 Duplicate Map Keys
+
+This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
+
+`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
+
+`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
+
+APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
+
+
+
+
+ 🔎 Tag Validity
+
+This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
+
+* Inadmissible type for tag content
+* Inadmissible value for tag content
+
+Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
+
+* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
+* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
+
+Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
+
+For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
+
+
+
+## Limitations
+
+If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
+
+* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
+* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
+* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
+
+## Fuzzing and Code Coverage
+
+__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
+
+__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
+
+
+
+## Versions and API Changes
+This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
+
+These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
+`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
+
+Exclusions from SemVer:
+- Newly added API documented as "subject to change".
+- Newly added API in the master branch that has never been tagged in non-beta release.
+- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+
+This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
+
+## Code of Conduct
+
+This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
+
+## Contributing
+
+Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
+
+For more info, see [How to Contribute](CONTRIBUTING.md).
+
+## Security Policy
+
+Security fixes are provided for the latest released version of fxamacker/cbor.
+
+For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
+
+## Acknowledgements
+
+Many thanks to all the contributors on this project!
+
+I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
+
+I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
+
+Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
+
+This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
+
+Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
+
+Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
+
+This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
+
+## License
+
+Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
+
+fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
+
+
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
new file mode 100644
index 0000000000..9c05146d16
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+Security fixes are provided for the latest released version of fxamacker/cbor.
+
+If the security vulnerability is already known to the public, then you can open an issue as a bug report.
+
+To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
new file mode 100644
index 0000000000..23c5724d2e
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
@@ -0,0 +1,90 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "errors"
+)
+
+// ByteString represents CBOR byte string (major type 2). ByteString can be used
+// when using a Go []byte is not possible or convenient. For example, Go doesn't
+// allow []byte as map key, so ByteString can be used to support data formats
+// having CBOR map with byte string keys. ByteString can also be used to
+// encode invalid UTF-8 string as CBOR byte string.
+// See DecOption.MapKeyByteStringMode for more details.
+type ByteString string
+
+// Bytes returns bytes representing ByteString.
+func (bs ByteString) Bytes() []byte {
+ return []byte(bs)
+}
+
+// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
+func (bs ByteString) MarshalCBOR() ([]byte, error) {
+ e := getEncodeBuffer()
+ defer putEncodeBuffer(e)
+
+ // Encode length
+ encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
+
+ // Encode data
+ buf := make([]byte, e.Len()+len(bs))
+ n := copy(buf, e.Bytes())
+ copy(buf[n:], bs)
+
+ return buf, nil
+}
+
+// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
+// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
+func (bs *ByteString) UnmarshalCBOR(data []byte) error {
+ if bs == nil {
+ return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check well-formedness of CBOR data item.
+ // ByteString.UnmarshalCBOR() is exported, so
+ // the codec needs to support same behavior for:
+ // - Unmarshal(data, *ByteString)
+ // - ByteString.UnmarshalCBOR(data)
+ err := d.wellformed(false, false)
+ if err != nil {
+ return err
+ }
+
+ return bs.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
+// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (bs *ByteString) unmarshalCBOR(data []byte) error {
+ if bs == nil {
+ return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
+ }
+
+ // Decoding CBOR null and CBOR undefined to ByteString resets data.
+ // This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
+ if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
+ *bs = ""
+ return nil
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check if CBOR data type is byte string
+ if typ := d.nextCBORType(); typ != cborTypeByteString {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
+ }
+
+ b, _ := d.parseByteString()
+ *bs = ByteString(b)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go
new file mode 100644
index 0000000000..5051f110fb
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/cache.go
@@ -0,0 +1,370 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type encodeFuncs struct {
+ ef encodeFunc
+ ief isEmptyFunc
+ izf isZeroFunc
+}
+
+var (
+ decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
+ encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
+ encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
+ typeInfoCache sync.Map // map[reflect.Type]*typeInfo
+)
+
+type specialType int
+
+const (
+ specialTypeNone specialType = iota
+ specialTypeUnmarshalerIface
+ specialTypeUnexportedUnmarshalerIface
+ specialTypeEmptyIface
+ specialTypeIface
+ specialTypeTag
+ specialTypeTime
+ specialTypeJSONUnmarshalerIface
+)
+
+type typeInfo struct {
+ elemTypeInfo *typeInfo
+ keyTypeInfo *typeInfo
+ typ reflect.Type
+ kind reflect.Kind
+ nonPtrType reflect.Type
+ nonPtrKind reflect.Kind
+ spclType specialType
+}
+
+func newTypeInfo(t reflect.Type) *typeInfo {
+ tInfo := typeInfo{typ: t, kind: t.Kind()}
+
+ for t.Kind() == reflect.Pointer {
+ t = t.Elem()
+ }
+
+ k := t.Kind()
+
+ tInfo.nonPtrType = t
+ tInfo.nonPtrKind = k
+
+ if k == reflect.Interface {
+ if t.NumMethod() == 0 {
+ tInfo.spclType = specialTypeEmptyIface
+ } else {
+ tInfo.spclType = specialTypeIface
+ }
+ } else if t == typeTag {
+ tInfo.spclType = specialTypeTag
+ } else if t == typeTime {
+ tInfo.spclType = specialTypeTime
+ } else if reflect.PointerTo(t).Implements(typeUnexportedUnmarshaler) {
+ tInfo.spclType = specialTypeUnexportedUnmarshalerIface
+ } else if reflect.PointerTo(t).Implements(typeUnmarshaler) {
+ tInfo.spclType = specialTypeUnmarshalerIface
+ } else if reflect.PointerTo(t).Implements(typeJSONUnmarshaler) {
+ tInfo.spclType = specialTypeJSONUnmarshalerIface
+ }
+
+ switch k {
+ case reflect.Array, reflect.Slice:
+ tInfo.elemTypeInfo = getTypeInfo(t.Elem())
+ case reflect.Map:
+ tInfo.keyTypeInfo = getTypeInfo(t.Key())
+ tInfo.elemTypeInfo = getTypeInfo(t.Elem())
+ }
+
+ return &tInfo
+}
+
+type decodingStructType struct {
+ fields fields
+ fieldIndicesByName map[string]int
+ err error
+ toArray bool
+}
+
+// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
+// here's a very basic implementation of an aggregated error.
+type multierror []error
+
+func (m multierror) Error() string {
+ var sb strings.Builder
+ for i, err := range m {
+ sb.WriteString(err.Error())
+ if i < len(m)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ return sb.String()
+}
+
+func getDecodingStructType(t reflect.Type) *decodingStructType {
+ if v, _ := decodingStructTypeCache.Load(t); v != nil {
+ return v.(*decodingStructType)
+ }
+
+ flds, structOptions := getFields(t)
+
+ toArray := hasToArrayOption(structOptions)
+
+ var errs []error
+ for i := 0; i < len(flds); i++ {
+ if flds[i].keyAsInt {
+ nameAsInt, numErr := strconv.Atoi(flds[i].name)
+ if numErr != nil {
+ errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
+ break
+ }
+ flds[i].nameAsInt = int64(nameAsInt)
+ }
+
+ flds[i].typInfo = getTypeInfo(flds[i].typ)
+ }
+
+ fieldIndicesByName := make(map[string]int, len(flds))
+ for i, fld := range flds {
+ if _, ok := fieldIndicesByName[fld.name]; ok {
+ errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
+ continue
+ }
+ fieldIndicesByName[fld.name] = i
+ }
+
+ var err error
+ {
+ var multi multierror
+ for _, each := range errs {
+ if each != nil {
+ multi = append(multi, each)
+ }
+ }
+ if len(multi) == 1 {
+ err = multi[0]
+ } else if len(multi) > 1 {
+ err = multi
+ }
+ }
+
+ structType := &decodingStructType{
+ fields: flds,
+ fieldIndicesByName: fieldIndicesByName,
+ err: err,
+ toArray: toArray,
+ }
+ decodingStructTypeCache.Store(t, structType)
+ return structType
+}
+
+type encodingStructType struct {
+ fields fields
+ bytewiseFields fields
+ lengthFirstFields fields
+ omitEmptyFieldsIdx []int
+ err error
+ toArray bool
+}
+
+func (st *encodingStructType) getFields(em *encMode) fields {
+ switch em.sort {
+ case SortNone, SortFastShuffle:
+ return st.fields
+ case SortLengthFirst:
+ return st.lengthFirstFields
+ default:
+ return st.bytewiseFields
+ }
+}
+
+type bytewiseFieldSorter struct {
+ fields fields
+}
+
+func (x *bytewiseFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *bytewiseFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *bytewiseFieldSorter) Less(i, j int) bool {
+ return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
+}
+
+type lengthFirstFieldSorter struct {
+ fields fields
+}
+
+func (x *lengthFirstFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *lengthFirstFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *lengthFirstFieldSorter) Less(i, j int) bool {
+ if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
+ return len(x.fields[i].cborName) < len(x.fields[j].cborName)
+ }
+ return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
+}
+
+func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
+ if v, _ := encodingStructTypeCache.Load(t); v != nil {
+ structType := v.(*encodingStructType)
+ return structType, structType.err
+ }
+
+ flds, structOptions := getFields(t)
+
+ if hasToArrayOption(structOptions) {
+ return getEncodingStructToArrayType(t, flds)
+ }
+
+ var err error
+ var hasKeyAsInt bool
+ var hasKeyAsStr bool
+ var omitEmptyIdx []int
+ e := getEncodeBuffer()
+ for i := 0; i < len(flds); i++ {
+ // Get field's encodeFunc
+ flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
+ if flds[i].ef == nil {
+ err = &UnsupportedTypeError{t}
+ break
+ }
+
+ // Encode field name
+ if flds[i].keyAsInt {
+ nameAsInt, numErr := strconv.Atoi(flds[i].name)
+ if numErr != nil {
+ err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
+ break
+ }
+ flds[i].nameAsInt = int64(nameAsInt)
+ if nameAsInt >= 0 {
+ encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
+ } else {
+ n := nameAsInt*(-1) - 1
+ encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
+ }
+ flds[i].cborName = make([]byte, e.Len())
+ copy(flds[i].cborName, e.Bytes())
+ e.Reset()
+
+ hasKeyAsInt = true
+ } else {
+ encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
+ flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
+ n := copy(flds[i].cborName, e.Bytes())
+ copy(flds[i].cborName[n:], flds[i].name)
+ e.Reset()
+
+ // If cborName contains a text string, then cborNameByteString contains a
+ // string that has the byte string major type but is otherwise identical to
+ // cborName.
+ flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
+ copy(flds[i].cborNameByteString, flds[i].cborName)
+ // Reset encoded CBOR type to byte string, preserving the "additional
+ // information" bits:
+ flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
+ getAdditionalInformation(flds[i].cborNameByteString[0])
+
+ hasKeyAsStr = true
+ }
+
+ // Check if field can be omitted when empty
+ if flds[i].omitEmpty {
+ omitEmptyIdx = append(omitEmptyIdx, i)
+ }
+ }
+ putEncodeBuffer(e)
+
+ if err != nil {
+ structType := &encodingStructType{err: err}
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+ }
+
+ // Sort fields by canonical order
+ bytewiseFields := make(fields, len(flds))
+ copy(bytewiseFields, flds)
+ sort.Sort(&bytewiseFieldSorter{bytewiseFields})
+
+ lengthFirstFields := bytewiseFields
+ if hasKeyAsInt && hasKeyAsStr {
+ lengthFirstFields = make(fields, len(flds))
+ copy(lengthFirstFields, flds)
+ sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
+ }
+
+ structType := &encodingStructType{
+ fields: flds,
+ bytewiseFields: bytewiseFields,
+ lengthFirstFields: lengthFirstFields,
+ omitEmptyFieldsIdx: omitEmptyIdx,
+ }
+
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+}
+
+func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
+ for i := 0; i < len(flds); i++ {
+ // Get field's encodeFunc
+ flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
+ if flds[i].ef == nil {
+ structType := &encodingStructType{err: &UnsupportedTypeError{t}}
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+ }
+ }
+
+ structType := &encodingStructType{
+ fields: flds,
+ toArray: true,
+ }
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+}
+
+func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) {
+ if v, _ := encodeFuncCache.Load(t); v != nil {
+ fs := v.(encodeFuncs)
+ return fs.ef, fs.ief, fs.izf
+ }
+ ef, ief, izf := getEncodeFuncInternal(t)
+ encodeFuncCache.Store(t, encodeFuncs{ef, ief, izf})
+ return ef, ief, izf
+}
+
+func getTypeInfo(t reflect.Type) *typeInfo {
+ if v, _ := typeInfoCache.Load(t); v != nil {
+ return v.(*typeInfo)
+ }
+ tInfo := newTypeInfo(t)
+ typeInfoCache.Store(t, tInfo)
+ return tInfo
+}
+
+func hasToArrayOption(tag string) bool {
+ s := ",toarray"
+ idx := strings.Index(tag, s)
+ return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go
new file mode 100644
index 0000000000..9cf33cd209
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/common.go
@@ -0,0 +1,191 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+)
+
+type cborType uint8
+
+const (
+ cborTypePositiveInt cborType = 0x00
+ cborTypeNegativeInt cborType = 0x20
+ cborTypeByteString cborType = 0x40
+ cborTypeTextString cborType = 0x60
+ cborTypeArray cborType = 0x80
+ cborTypeMap cborType = 0xa0
+ cborTypeTag cborType = 0xc0
+ cborTypePrimitives cborType = 0xe0
+)
+
+func (t cborType) String() string {
+ switch t {
+ case cborTypePositiveInt:
+ return "positive integer"
+ case cborTypeNegativeInt:
+ return "negative integer"
+ case cborTypeByteString:
+ return "byte string"
+ case cborTypeTextString:
+ return "UTF-8 text string"
+ case cborTypeArray:
+ return "array"
+ case cborTypeMap:
+ return "map"
+ case cborTypeTag:
+ return "tag"
+ case cborTypePrimitives:
+ return "primitives"
+ default:
+ return "Invalid type " + strconv.Itoa(int(t))
+ }
+}
+
+type additionalInformation uint8
+
+const (
+ maxAdditionalInformationWithoutArgument = 23
+ additionalInformationWith1ByteArgument = 24
+ additionalInformationWith2ByteArgument = 25
+ additionalInformationWith4ByteArgument = 26
+ additionalInformationWith8ByteArgument = 27
+
+ // For major type 7.
+ additionalInformationAsFalse = 20
+ additionalInformationAsTrue = 21
+ additionalInformationAsNull = 22
+ additionalInformationAsUndefined = 23
+ additionalInformationAsFloat16 = 25
+ additionalInformationAsFloat32 = 26
+ additionalInformationAsFloat64 = 27
+
+ // For major type 2, 3, 4, 5.
+ additionalInformationAsIndefiniteLengthFlag = 31
+)
+
+const (
+ maxSimpleValueInAdditionalInformation = 23
+ minSimpleValueIn1ByteArgument = 32
+)
+
+func (ai additionalInformation) isIndefiniteLength() bool {
+ return ai == additionalInformationAsIndefiniteLengthFlag
+}
+
+const (
+ // From RFC 8949 Section 3:
+ // "The initial byte of each encoded data item contains both information about the major type
+ // (the high-order 3 bits, described in Section 3.1) and additional information
+ // (the low-order 5 bits)."
+
+ // typeMask is used to extract major type in initial byte of encoded data item.
+ typeMask = 0xe0
+
+ // additionalInformationMask is used to extract additional information in initial byte of encoded data item.
+ additionalInformationMask = 0x1f
+)
+
+func getType(raw byte) cborType {
+ return cborType(raw & typeMask)
+}
+
+func getAdditionalInformation(raw byte) byte {
+ return raw & additionalInformationMask
+}
+
+func isBreakFlag(raw byte) bool {
+ return raw == cborBreakFlag
+}
+
+func parseInitialByte(b byte) (t cborType, ai byte) {
+ return getType(b), getAdditionalInformation(b)
+}
+
+const (
+ tagNumRFC3339Time = 0
+ tagNumEpochTime = 1
+ tagNumUnsignedBignum = 2
+ tagNumNegativeBignum = 3
+ tagNumExpectedLaterEncodingBase64URL = 21
+ tagNumExpectedLaterEncodingBase64 = 22
+ tagNumExpectedLaterEncodingBase16 = 23
+ tagNumSelfDescribedCBOR = 55799
+)
+
+const (
+ cborBreakFlag = byte(0xff)
+ cborByteStringWithIndefiniteLengthHead = byte(0x5f)
+ cborTextStringWithIndefiniteLengthHead = byte(0x7f)
+ cborArrayWithIndefiniteLengthHead = byte(0x9f)
+ cborMapWithIndefiniteLengthHead = byte(0xbf)
+)
+
+var (
+ cborFalse = []byte{0xf4}
+ cborTrue = []byte{0xf5}
+ cborNil = []byte{0xf6}
+ cborNaN = []byte{0xf9, 0x7e, 0x00}
+ cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
+ cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
+)
+
+// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
+func validBuiltinTag(tagNum uint64, contentHead byte) error {
+ t := getType(contentHead)
+ switch tagNum {
+ case tagNumRFC3339Time:
+ // Tag content (date/time text string in RFC 3339 format) must be string type.
+ if t != cborTypeTextString {
+ return newInadmissibleTagContentTypeError(
+ tagNumRFC3339Time,
+ "text string",
+ t.String())
+ }
+ return nil
+
+ case tagNumEpochTime:
+ // Tag content (epoch date/time) must be uint, int, or float type.
+ if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
+ return newInadmissibleTagContentTypeError(
+ tagNumEpochTime,
+ "integer or floating-point number",
+ t.String())
+ }
+ return nil
+
+ case tagNumUnsignedBignum, tagNumNegativeBignum:
+ // Tag content (bignum) must be byte type.
+ if t != cborTypeByteString {
+ return newInadmissibleTagContentTypeErrorf(
+ fmt.Sprintf(
+ "tag number %d or %d must be followed by byte string, got %s",
+ tagNumUnsignedBignum,
+ tagNumNegativeBignum,
+ t.String(),
+ ))
+ }
+ return nil
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // From RFC 8949 3.4.5.2:
+ // The data item tagged can be a byte string or any other data item. In the latter
+ // case, the tag applies to all of the byte string data items contained in the data
+ // item, except for those contained in a nested data item tagged with an expected
+ // conversion.
+ return nil
+ }
+
+ return nil
+}
+
+// Transcoder is a scheme for transcoding a single CBOR encoded data item to or from a different
+// data format.
+type Transcoder interface {
+ // Transcode reads the data item in its source format from a Reader and writes a
+ // corresponding representation in its destination format to a Writer.
+ Transcode(dst io.Writer, src io.Reader) error
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go
new file mode 100644
index 0000000000..f0bdc3b38d
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/decode.go
@@ -0,0 +1,3318 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/x448/float16"
+)
+
+// Unmarshal parses the CBOR-encoded data into the value pointed to by v
+// using default decoding options. If v is nil, not a pointer, or
+// a nil pointer, Unmarshal returns an error.
+//
+// To unmarshal CBOR into a value implementing the Unmarshaler interface,
+// Unmarshal calls that value's UnmarshalCBOR method with a valid
+// CBOR value.
+//
+// To unmarshal CBOR byte string into a value implementing the
+// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's
+// UnmarshalBinary method with decoded CBOR byte string.
+//
+// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil
+// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal
+// unmarshals CBOR into the value pointed to by the pointer. If the
+// pointer is nil, Unmarshal creates a new value for it to point to.
+//
+// To unmarshal CBOR into an empty interface value, Unmarshal uses the
+// following rules:
+//
+// CBOR booleans decode to bool.
+// CBOR positive integers decode to uint64.
+// CBOR negative integers decode to int64 (big.Int if value overflows).
+// CBOR floating points decode to float64.
+// CBOR byte strings decode to []byte.
+// CBOR text strings decode to string.
+// CBOR arrays decode to []interface{}.
+// CBOR maps decode to map[interface{}]interface{}.
+// CBOR null and undefined values decode to nil.
+// CBOR times (tag 0 and 1) decode to time.Time.
+// CBOR bignums (tag 2 and 3) decode to big.Int.
+// CBOR tags with an unrecognized number decode to cbor.Tag
+//
+// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice
+// if the CBOR array is empty or slice capacity is less than CBOR array length.
+// Otherwise Unmarshal overwrites existing elements, and sets slice length
+// to CBOR array length.
+//
+// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array
+// elements into Go array elements. If the Go array is smaller than the
+// CBOR array, the extra CBOR array elements are discarded. If the CBOR
+// array is smaller than the Go array, the extra Go array elements are
+// set to zero values.
+//
+// To unmarshal a CBOR array into a struct, struct must have a special field "_"
+// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct
+// fields. Any "omitempty" struct field tag option is ignored in this case.
+//
+// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the
+// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing
+// entries. Unmarshal stores key-value pairs from the CBOR map into Go map.
+// See DecOptions.DupMapKey to enable duplicate map key detection.
+//
+// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the
+// keys in the following priority:
+//
+// 1. "cbor" key in struct field tag,
+// 2. "json" key in struct field tag,
+// 3. struct field name.
+//
+// Unmarshal tries an exact match for field name, then a case-insensitive match.
+// Map key-value pairs without corresponding struct fields are ignored. See
+// DecOptions.ExtraReturnErrors to return error at unknown field.
+//
+// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text
+// string formatted in RFC3339. To unmarshal a CBOR integer/float into a
+// time.Time value, Unmarshal creates an unix time with integer/float as seconds
+// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite
+// and NaN float values decode to time.Time's zero value.
+//
+// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a
+// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often
+// used to mean "not present", unmarshaling CBOR null and undefined value
+// into any other Go type has no effect and returns no error.
+//
+// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time),
+// and tag 2 and 3 (bignum).
+//
+// Unmarshal returns ExtraneousDataError error (without decoding into v)
+// if there are any remaining bytes following the first valid CBOR data item.
+// See UnmarshalFirst, if you want to unmarshal only the first
+// CBOR data item without ExtraneousDataError caused by remaining bytes.
+func Unmarshal(data []byte, v any) error {
+ return defaultDecMode.Unmarshal(data, v)
+}
+
+// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+// using default decoding options. Any remaining bytes are returned in rest.
+//
+// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+//
+// See the documentation for Unmarshal for details.
+func UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
+ return defaultDecMode.UnmarshalFirst(data, v)
+}
+
+// Valid checks whether data is a well-formed encoded CBOR data item and
+// that it complies with default restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+//
+// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+//
+// Deprecated: Valid is kept for compatibility and should not be used.
+// Use Wellformed instead because it has a more appropriate name.
+func Valid(data []byte) error {
+ return defaultDecMode.Valid(data)
+}
+
+// Wellformed checks whether data is a well-formed encoded CBOR data item and
+// that it complies with default restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+func Wellformed(data []byte) error {
+ return defaultDecMode.Wellformed(data)
+}
+
+// Unmarshaler is the interface implemented by types that wish to unmarshal
+// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR
+// must copy the CBOR data if it needs to use it after returning.
+type Unmarshaler interface {
+ UnmarshalCBOR([]byte) error
+}
+
+type unmarshaler interface {
+ unmarshalCBOR([]byte) error
+}
+
+// InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+type InvalidUnmarshalError struct {
+ s string
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ return e.s
+}
+
+// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type.
+type UnmarshalTypeError struct {
+ CBORType string // type of CBOR value
+ GoType string // type of Go value it could not be decoded into
+ StructFieldName string // name of the struct field holding the Go value (optional)
+ errorMsg string // additional error message (optional)
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ var s string
+ if e.StructFieldName != "" {
+ s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType
+ } else {
+ s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType
+ }
+ if e.errorMsg != "" {
+ s += " (" + e.errorMsg + ")"
+ }
+ return s
+}
+
+// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map.
+// For example, Go doesn't allow slice as map key.
+type InvalidMapKeyTypeError struct {
+ GoType string
+}
+
+func (e *InvalidMapKeyTypeError) Error() string {
+ return "cbor: invalid map key type: " + e.GoType
+}
+
+// DupMapKeyError describes detected duplicate map key in CBOR map.
+type DupMapKeyError struct {
+ Key any
+ Index int
+}
+
+func (e *DupMapKeyError) Error() string {
+ return fmt.Sprintf("cbor: found duplicate map key %#v at map element index %d", e.Key, e.Index)
+}
+
+// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct.
+type UnknownFieldError struct {
+ Index int
+}
+
+func (e *UnknownFieldError) Error() string {
+ return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index)
+}
+
+// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item
+// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as
+// described in RFC 8949 Section 5 Paragraph 3).
+type UnacceptableDataItemError struct {
+ CBORType string
+ Message string
+}
+
+func (e UnacceptableDataItemError) Error() string {
+ return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message)
+}
+
+// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when
+// using non-default ByteStringExpectedFormat decoding option that makes decoder expect
+// a specified format such as base64, hex, etc.
+type ByteStringExpectedFormatError struct {
+ expectedFormatOption ByteStringExpectedFormatMode
+ err error
+}
+
+func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError {
+ return &ByteStringExpectedFormatError{expectedFormatOption, err}
+}
+
+func (e *ByteStringExpectedFormatError) Error() string {
+ switch e.expectedFormatOption {
+ case ByteStringExpectedBase64URL:
+ return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err)
+
+ case ByteStringExpectedBase64:
+ return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err)
+
+ case ByteStringExpectedBase16:
+ return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err)
+
+ default:
+ return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err)
+ }
+}
+
+func (e *ByteStringExpectedFormatError) Unwrap() error {
+ return e.err
+}
+
+// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags
+// fails because of inadmissible type for tag content. Currently, the built-in
+// CBOR tags in this codec are tags 0-3 and 21-23.
+// See "Tag validity" in RFC 8949 Section 5.3.2.
+type InadmissibleTagContentTypeError struct {
+ s string
+ tagNum int
+ expectedTagContentType string
+ gotTagContentType string
+}
+
+func newInadmissibleTagContentTypeError(
+ tagNum int,
+ expectedTagContentType string,
+ gotTagContentType string,
+) *InadmissibleTagContentTypeError {
+ return &InadmissibleTagContentTypeError{
+ tagNum: tagNum,
+ expectedTagContentType: expectedTagContentType,
+ gotTagContentType: gotTagContentType,
+ }
+}
+
+func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError {
+ return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor"
+}
+
+func (e *InadmissibleTagContentTypeError) Error() string {
+ if e.s == "" {
+ return fmt.Sprintf(
+ "cbor: tag number %d must be followed by %s, got %s",
+ e.tagNum,
+ e.expectedTagContentType,
+ e.gotTagContentType,
+ )
+ }
+ return e.s
+}
+
+// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if:
+// 1. When decoding into a struct, both keys match the same struct field. The keys are also
+// considered duplicates if neither matches any field and decoding to interface{} would produce
+// equal (==) values for both keys.
+// 2. When decoding into a map, both keys are equal (==) when decoded into values of the
+// destination map's key type.
+type DupMapKeyMode int
+
+const (
+ // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error)
+ // uses faster of "keep first" or "keep last" depending on Go data type and other factors.
+ DupMapKeyQuiet DupMapKeyMode = iota
+
+ // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys.
+ // APF means "Allow Partial Fill" and the destination map or struct can be partially filled.
+ // If a duplicate map key is detected, DupMapKeyError is returned without further decoding
+ // of the map. It's the caller's responsibility to respond to DupMapKeyError by
+ // discarding the partially filled result if their protocol requires it.
+ // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use.
+ DupMapKeyEnforcedAPF
+
+ maxDupMapKeyMode
+)
+
+func (dmkm DupMapKeyMode) valid() bool {
+ return dmkm >= 0 && dmkm < maxDupMapKeyMode
+}
+
+// IndefLengthMode specifies whether to allow indefinite length items.
+type IndefLengthMode int
+
+const (
+ // IndefLengthAllowed allows indefinite length items.
+ IndefLengthAllowed IndefLengthMode = iota
+
+ // IndefLengthForbidden disallows indefinite length items.
+ IndefLengthForbidden
+
+ maxIndefLengthMode
+)
+
+func (m IndefLengthMode) valid() bool {
+ return m >= 0 && m < maxIndefLengthMode
+}
+
+// TagsMode specifies whether to allow CBOR tags.
+type TagsMode int
+
+const (
+ // TagsAllowed allows CBOR tags.
+ TagsAllowed TagsMode = iota
+
+ // TagsForbidden disallows CBOR tags.
+ TagsForbidden
+
+ maxTagsMode
+)
+
+func (tm TagsMode) valid() bool {
+ return tm >= 0 && tm < maxTagsMode
+}
+
+// IntDecMode specifies which Go type (int64, uint64, or big.Int) should
+// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}.
+type IntDecMode int
+
+const (
+ // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR unsigned integer (major type 0) to:
+ // - uint64
+ // It decodes CBOR negative integer (major type 1) to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
+ IntDecConvertNone IntDecMode = iota
+
+ // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR integers (major type 0 and 1) to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64
+ // - return UnmarshalTypeError if value > math.MaxInt64
+ // Deprecated: IntDecConvertSigned should not be used.
+ // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone.
+ IntDecConvertSigned
+
+ // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR integers (major type 0 and 1) to:
+ // - int64 if value fits
+ // - return UnmarshalTypeError if value doesn't fit into int64
+ IntDecConvertSignedOrFail
+
+ // IntDecConvertSignedOrBigInt affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It makes CBOR integers (major type 0 and 1) decode to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
+ IntDecConvertSignedOrBigInt
+
+ maxIntDec
+)
+
+func (idm IntDecMode) valid() bool {
+ return idm >= 0 && idm < maxIntDec
+}
+
+// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2)
+// as Go map key when decoding CBOR map key into an empty Go interface value.
+// Specifically, this option applies when decoding CBOR map into
+// - Go empty interface, or
+// - Go map with empty interface as key type.
+// The CBOR map key types handled by this option are
+// - byte string
+// - tagged byte string
+// - nested tagged byte string
+type MapKeyByteStringMode int
+
+const (
+ // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key.
+ // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to
+ // ByteString which has underlying string type.
+ // This is the default setting.
+ MapKeyByteStringAllowed MapKeyByteStringMode = iota
+
+ // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key.
+ // Attempting to decode CBOR byte string as map key into empty interface value
+ // returns a decoding error.
+ MapKeyByteStringForbidden
+
+ maxMapKeyByteStringMode
+)
+
+func (mkbsm MapKeyByteStringMode) valid() bool {
+ return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode
+}
+
+// ExtraDecErrorCond specifies extra conditions that should be treated as errors.
+type ExtraDecErrorCond uint
+
+// ExtraDecErrorNone indicates no extra error condition.
+const ExtraDecErrorNone ExtraDecErrorCond = 0
+
+const (
+ // ExtraDecErrorUnknownField indicates error condition when destination
+ // Go struct doesn't have a field matching a CBOR map key.
+ ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota
+
+ maxExtraDecError
+)
+
+func (ec ExtraDecErrorCond) valid() bool {
+ return ec < maxExtraDecError
+}
+
+// UTF8Mode option specifies if decoder should
+// decode CBOR Text containing invalid UTF-8 string.
+type UTF8Mode int
+
+const (
+ // UTF8RejectInvalid rejects CBOR Text containing
+ // invalid UTF-8 string.
+ UTF8RejectInvalid UTF8Mode = iota
+
+ // UTF8DecodeInvalid allows decoding CBOR Text containing
+ // invalid UTF-8 string.
+ UTF8DecodeInvalid
+
+ maxUTF8Mode
+)
+
+func (um UTF8Mode) valid() bool {
+ return um >= 0 && um < maxUTF8Mode
+}
+
+// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names.
+type FieldNameMatchingMode int
+
+const (
+ // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag
+ // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose
+ // name is a case-insensitive match for the item's key.
+ FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota
+
+ // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an
+ // exact match for the item's key.
+ FieldNameMatchingCaseSensitive
+
+ maxFieldNameMatchingMode
+)
+
+func (fnmm FieldNameMatchingMode) valid() bool {
+ return fnmm >= 0 && fnmm < maxFieldNameMatchingMode
+}
+
+// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}.
+type BigIntDecMode int
+
+const (
+ // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int)
+ // when unmarshaling into a Go interface{}.
+ BigIntDecodeValue BigIntDecMode = iota
+
+ // BigIntDecodePointer makes CBOR bignum decode to *big.Int when
+ // unmarshaling into a Go interface{}.
+ BigIntDecodePointer
+
+ maxBigIntDecMode
+)
+
+func (bidm BigIntDecMode) valid() bool {
+ return bidm >= 0 && bidm < maxBigIntDecMode
+}
+
+// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string.
+type ByteStringToStringMode int
+
+const (
+ // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string.
+ ByteStringToStringForbidden ByteStringToStringMode = iota
+
+ // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string.
+ ByteStringToStringAllowed
+
+ // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string
+ // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of
+ // the "expected later encoding" tags (numbers 21 through 23), the destination string will
+ // be populated by applying the designated text encoding to the contents of the input byte
+ // string.
+ ByteStringToStringAllowedWithExpectedLaterEncoding
+
+ maxByteStringToStringMode
+)
+
+func (bstsm ByteStringToStringMode) valid() bool {
+ return bstsm >= 0 && bstsm < maxByteStringToStringMode
+}
+
+// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name.
+type FieldNameByteStringMode int
+
+const (
+ // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name.
+ FieldNameByteStringForbidden FieldNameByteStringMode = iota
+
+ // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names.
+ FieldNameByteStringAllowed
+
+ maxFieldNameByteStringMode
+)
+
+func (fnbsm FieldNameByteStringMode) valid() bool {
+ return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode
+}
+
+// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any).
+// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet.
+type UnrecognizedTagToAnyMode int
+
+const (
+ // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag
+ // when decoding unrecognized CBOR tag into an empty interface.
+ UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota
+
+ // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type)
+ // when decoding unrecognized CBOR tag into an empty interface.
+ UnrecognizedTagContentToAny
+
+ maxUnrecognizedTagToAny
+)
+
+func (uttam UnrecognizedTagToAnyMode) valid() bool {
+ return uttam >= 0 && uttam < maxUnrecognizedTagToAny
+}
+
+// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any).
+// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format.
+type TimeTagToAnyMode int
+
+const (
+ // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToTime TimeTagToAnyMode = iota
+
+ // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToRFC3339
+
+ // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToRFC3339Nano
+
+ maxTimeTagToAnyMode
+)
+
+func (tttam TimeTagToAnyMode) valid() bool {
+ return tttam >= 0 && tttam < maxTimeTagToAnyMode
+}
+
+// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value
+// number (0...23 and 32...255).
+type SimpleValueRegistry struct {
+ rejected [256]bool
+}
+
+// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is
+// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned.
+func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error {
+ return func(r *SimpleValueRegistry) error {
+ if sv >= 24 && sv <= 31 {
+ return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv)
+ }
+ r.rejected[sv] = true
+ return nil
+ }
+}
+
+// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided
+// functions in order against a registry that is pre-populated with the defaults for all well-formed
+// simple value numbers.
+func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) {
+ var r SimpleValueRegistry
+ for _, fn := range fns {
+ if err := fn(&r); err != nil {
+ return nil, err
+ }
+ }
+ return &r, nil
+}
+
+// NaNMode specifies how to decode floating-point values (major type 7, additional information 25
+// through 27) representing NaN (not-a-number).
+type NaNMode int
+
+const (
+ // NaNDecodeAllowed will decode NaN values to Go float32 or float64.
+ NaNDecodeAllowed NaNMode = iota
+
+ // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value.
+ NaNDecodeForbidden
+
+ maxNaNDecode
+)
+
+func (ndm NaNMode) valid() bool {
+ return ndm >= 0 && ndm < maxNaNDecode
+}
+
+// InfMode specifies how to decode floating-point values (major type 7, additional information 25
+// through 27) representing positive or negative infinity.
+type InfMode int
+
+const (
+ // InfDecodeAllowed will decode infinite values to Go float32 or float64.
+ InfDecodeAllowed InfMode = iota
+
+ // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an
+ // infinite value.
+ InfDecodeForbidden
+
+ maxInfDecode
+)
+
+func (idm InfMode) valid() bool {
+ return idm >= 0 && idm < maxInfDecode
+}
+
+// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time.
+type ByteStringToTimeMode int
+
+const (
+ // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time.
+ ByteStringToTimeForbidden ByteStringToTimeMode = iota
+
+ // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time.
+ ByteStringToTimeAllowed
+
+ maxByteStringToTimeMode
+)
+
+func (bttm ByteStringToTimeMode) valid() bool {
+ return bttm >= 0 && bttm < maxByteStringToTimeMode
+}
+
+// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice
+// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if
+// the CBOR byte string does not contain the expected format (e.g. base64) specified.
+// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters"
+// in RFC 8949 Section 3.4.5.2.
+type ByteStringExpectedFormatMode int
+
+const (
+ // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice
+ // if the byte string is not tagged by CBOR tag 21-23.
+ ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota
+
+ // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base64url-encoded bytes into Go slice.
+ ByteStringExpectedBase64URL
+
+ // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base64-encoded bytes into Go slice.
+ ByteStringExpectedBase64
+
+ // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base16-encoded bytes into Go slice.
+ ByteStringExpectedBase16
+
+ maxByteStringExpectedFormatMode
+)
+
+func (bsefm ByteStringExpectedFormatMode) valid() bool {
+ return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode
+}
+
+// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be
+// decoded.
+type BignumTagMode int
+
+const (
+ // BignumTagAllowed allows bignum tags to be decoded.
+ BignumTagAllowed BignumTagMode = iota
+
+ // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag
+ // is encountered in the input.
+ BignumTagForbidden
+
+ maxBignumTag
+)
+
+func (btm BignumTagMode) valid() bool {
+ return btm >= 0 && btm < maxBignumTag
+}
+
+// BinaryUnmarshalerMode specifies how to decode into types that implement
+// encoding.BinaryUnmarshaler.
+type BinaryUnmarshalerMode int
+
+const (
+ // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte
+ // string when decoding into a value that implements BinaryUnmarshaler.
+ BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota
+
+ // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode.
+ BinaryUnmarshalerNone
+
+ maxBinaryUnmarshalerMode
+)
+
+func (bum BinaryUnmarshalerMode) valid() bool {
+ return bum >= 0 && bum < maxBinaryUnmarshalerMode
+}
+
+// TextUnmarshalerMode specifies how to decode into types that implement
+// encoding.TextUnmarshaler.
+type TextUnmarshalerMode int
+
+const (
+ // TextUnmarshalerNone does not recognize TextUnmarshaler implementations during decode.
+ TextUnmarshalerNone TextUnmarshalerMode = iota
+
+ // TextUnmarshalerTextString will invoke UnmarshalText on the contents of a CBOR text
+ // string when decoding into a value that implements TextUnmarshaler.
+ TextUnmarshalerTextString
+
+ maxTextUnmarshalerMode
+)
+
+func (tum TextUnmarshalerMode) valid() bool {
+ return tum >= 0 && tum < maxTextUnmarshalerMode
+}
+
+// DecOptions specifies decoding options.
+type DecOptions struct {
+ // DupMapKey specifies whether to enforce duplicate map key.
+ DupMapKey DupMapKeyMode
+
+ // TimeTag specifies whether or not untagged data items, or tags other
+ // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1
+ // appears in an input, the type of its content is always validated as
+ // specified in RFC 8949. That behavior is not controlled by this
+ // option. The behavior of the supported modes are:
+ //
+ // DecTagIgnored (default): Untagged text strings and text strings
+ // enclosed in tags other than 0 and 1 are decoded as though enclosed
+ // in tag 0. Untagged unsigned integers, negative integers, and
+ // floating-point numbers (or those enclosed in tags other than 0 and
+ // 1) are decoded as though enclosed in tag 1. Decoding a tag other
+ // than 0 or 1 enclosing simple values null or undefined into a
+ // time.Time does not modify the destination value.
+ //
+ // DecTagOptional: Untagged text strings are decoded as though
+ // enclosed in tag 0. Untagged unsigned integers, negative integers,
+ // and floating-point numbers are decoded as though enclosed in tag
+ // 1. Tags other than 0 and 1 will produce an error on attempts to
+ // decode them into a time.Time.
+ //
+ // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any
+ // other input will produce an error.
+ TimeTag DecTagMode
+
+ // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
+ // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
+ // require larger amounts of stack to deserialize. Don't increase this higher than you require.
+ MaxNestedLevels int
+
+ // MaxArrayElements specifies the max number of elements for CBOR arrays.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxArrayElements int
+
+ // MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxMapPairs int
+
+ // IndefLength specifies whether to allow indefinite length CBOR items.
+ IndefLength IndefLengthMode
+
+ // TagsMd specifies whether to allow CBOR tags (major type 6).
+ TagsMd TagsMode
+
+ // IntDec specifies which Go integer type (int64, uint64, or [big.Int]) to use
+ // when decoding CBOR int (major type 0 and 1) to Go interface{}.
+ IntDec IntDecMode
+
+ // MapKeyByteString specifies how to decode CBOR byte string as map key
+ // when decoding CBOR map with byte string key into an empty interface value.
+ // By default, an error is returned when attempting to decode CBOR byte string
+ // as map key because Go doesn't allow []byte as map key.
+ MapKeyByteString MapKeyByteStringMode
+
+ // ExtraReturnErrors specifies extra conditions that should be treated as errors.
+ ExtraReturnErrors ExtraDecErrorCond
+
+ // DefaultMapType specifies Go map type to create and decode to
+ // when unmarshaling CBOR into an empty interface value.
+ // By default, unmarshal uses map[interface{}]interface{}.
+ DefaultMapType reflect.Type
+
+ // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8.
+ // By default, unmarshal rejects CBOR text containing invalid UTF-8.
+ UTF8 UTF8Mode
+
+ // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names.
+ FieldNameMatching FieldNameMatchingMode
+
+ // BigIntDec specifies how to decode CBOR bignum to Go interface{}.
+ BigIntDec BigIntDecMode
+
+ // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte
+ // string into an empty interface value. Types to which a []byte is convertible are valid
+ // for this option, except for array and pointer-to-array types. If nil, the default is
+ // []byte.
+ DefaultByteStringType reflect.Type
+
+ // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string.
+ ByteStringToString ByteStringToStringMode
+
+ // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a
+ // Go struct field name.
+ FieldNameByteString FieldNameByteStringMode
+
+ // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface.
+ // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet.
+ UnrecognizedTagToAny UnrecognizedTagToAnyMode
+
+ // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any).
+ // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format.
+ TimeTagToAny TimeTagToAnyMode
+
+ // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding
+ // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped
+ // to the Go analog values false, true, nil, and nil, respectively, and all other simple
+ // values N (except the reserved simple values 24 through 31) are mapped to
+ // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded.
+ //
+ // Users may provide a custom SimpleValueRegistry constructed via
+ // NewSimpleValueRegistryFromDefaults.
+ SimpleValues *SimpleValueRegistry
+
+ // NaN specifies how to decode floating-point values (major type 7, additional information
+ // 25 through 27) representing NaN (not-a-number).
+ NaN NaNMode
+
+ // Inf specifies how to decode floating-point values (major type 7, additional information
+ // 25 through 27) representing positive or negative infinity.
+ Inf InfMode
+
+ // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time.
+ ByteStringToTime ByteStringToTimeMode
+
+ // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice
+ // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if
+ // the CBOR byte string does not contain the expected format (e.g. base64) specified.
+ // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters"
+ // in RFC 8949 Section 3.4.5.2.
+ ByteStringExpectedFormat ByteStringExpectedFormatMode
+
+ // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can
+ // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a
+ // CBOR input, independent of the type of the destination value of a particular Unmarshal
+ // operation.
+ BignumTag BignumTagMode
+
+ // BinaryUnmarshaler specifies how to decode into types that implement
+ // encoding.BinaryUnmarshaler.
+ BinaryUnmarshaler BinaryUnmarshalerMode
+
+ // TextUnmarshaler specifies how to decode into types that implement
+ // encoding.TextUnmarshaler.
+ TextUnmarshaler TextUnmarshalerMode
+
+ // JSONUnmarshalerTranscoder sets the transcoding scheme used to unmarshal types that
+ // implement json.Unmarshaler but do not also implement cbor.Unmarshaler. If nil, decoding
+ // behavior is not influenced by whether or not a type implements json.Unmarshaler.
+ JSONUnmarshalerTranscoder Transcoder
+}
+
+// DecMode returns DecMode with immutable options and no tags (safe for concurrency).
+func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.decMode()
+}
+
+// validForTags checks that the provided tag set is compatible with these options and returns a
+// non-nil error if and only if the provided tag set is incompatible.
+func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return errors.New("cbor: cannot create DecMode with nil value as TagSet")
+ }
+ if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding ||
+ opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone {
+ for _, tagNum := range []uint64{
+ tagNumExpectedLaterEncodingBase64URL,
+ tagNumExpectedLaterEncodingBase64,
+ tagNumExpectedLaterEncodingBase16,
+ } {
+ if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil {
+ return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt)
+ }
+ }
+
+ }
+ return nil
+}
+
+// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency).
+func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ if err := opts.validForTags(tags); err != nil {
+ return nil, err
+ }
+ dm, err := opts.decMode()
+ if err != nil {
+ return nil, err
+ }
+
+ // Copy tags
+ ts := tagSet(make(map[reflect.Type]*tagItem))
+ syncTags := tags.(*syncTagSet)
+ syncTags.RLock()
+ for contentType, tag := range syncTags.t {
+ if tag.opts.DecTag != DecTagIgnored {
+ ts[contentType] = tag
+ }
+ }
+ syncTags.RUnlock()
+
+ if len(ts) > 0 {
+ dm.tags = ts
+ }
+
+ return dm, nil
+}
+
+// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ if err := opts.validForTags(tags); err != nil {
+ return nil, err
+ }
+ dm, err := opts.decMode()
+ if err != nil {
+ return nil, err
+ }
+ dm.tags = tags
+ return dm, nil
+}
+
+const (
+ defaultMaxArrayElements = 131072
+ minMaxArrayElements = 16
+ maxMaxArrayElements = 2147483647
+
+ defaultMaxMapPairs = 131072
+ minMaxMapPairs = 16
+ maxMaxMapPairs = 2147483647
+
+ defaultMaxNestedLevels = 32
+ minMaxNestedLevels = 4
+ maxMaxNestedLevels = 65535
+)
+
+var defaultSimpleValues = func() *SimpleValueRegistry {
+ registry, err := NewSimpleValueRegistryFromDefaults()
+ if err != nil {
+ panic(err)
+ }
+ return registry
+}()
+
+//nolint:gocyclo // Each option comes with some manageable boilerplate
+func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam
+ if !opts.DupMapKey.valid() {
+ return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey)))
+ }
+
+ if !opts.TimeTag.valid() {
+ return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag)))
+ }
+
+ if !opts.IndefLength.valid() {
+ return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength)))
+ }
+
+ if !opts.TagsMd.valid() {
+ return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd)))
+ }
+
+ if !opts.IntDec.valid() {
+ return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec)))
+ }
+
+ if !opts.MapKeyByteString.valid() {
+ return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString)))
+ }
+
+ if opts.MaxNestedLevels == 0 {
+ opts.MaxNestedLevels = defaultMaxNestedLevels
+ } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels {
+ return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) +
+ " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])")
+ }
+
+ if opts.MaxArrayElements == 0 {
+ opts.MaxArrayElements = defaultMaxArrayElements
+ } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements {
+ return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) +
+ " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])")
+ }
+
+ if opts.MaxMapPairs == 0 {
+ opts.MaxMapPairs = defaultMaxMapPairs
+ } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs {
+ return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) +
+ " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])")
+ }
+
+ if !opts.ExtraReturnErrors.valid() {
+ return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors)))
+ }
+
+ if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map {
+ return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType)
+ }
+
+ if !opts.UTF8.valid() {
+ return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8)))
+ }
+
+ if !opts.FieldNameMatching.valid() {
+ return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching)))
+ }
+
+ if !opts.BigIntDec.valid() {
+ return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec)))
+ }
+
+ if opts.DefaultByteStringType != nil &&
+ opts.DefaultByteStringType.Kind() != reflect.String &&
+ (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) {
+ return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType)
+ }
+
+ if !opts.ByteStringToString.valid() {
+ return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString)))
+ }
+
+ if !opts.FieldNameByteString.valid() {
+ return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString)))
+ }
+
+ if !opts.UnrecognizedTagToAny.valid() {
+ return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny)))
+ }
+ simpleValues := opts.SimpleValues
+ if simpleValues == nil {
+ simpleValues = defaultSimpleValues
+ }
+
+ if !opts.TimeTagToAny.valid() {
+ return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny)))
+ }
+
+ if !opts.NaN.valid() {
+ return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN)))
+ }
+
+ if !opts.Inf.valid() {
+ return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf)))
+ }
+
+ if !opts.ByteStringToTime.valid() {
+ return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime)))
+ }
+
+ if !opts.ByteStringExpectedFormat.valid() {
+ return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat)))
+ }
+
+ if !opts.BignumTag.valid() {
+ return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag)))
+ }
+
+ if !opts.BinaryUnmarshaler.valid() {
+ return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler)))
+ }
+
+ if !opts.TextUnmarshaler.valid() {
+ return nil, errors.New("cbor: invalid TextUnmarshaler " + strconv.Itoa(int(opts.TextUnmarshaler)))
+ }
+
+ dm := decMode{
+ dupMapKey: opts.DupMapKey,
+ timeTag: opts.TimeTag,
+ maxNestedLevels: opts.MaxNestedLevels,
+ maxArrayElements: opts.MaxArrayElements,
+ maxMapPairs: opts.MaxMapPairs,
+ indefLength: opts.IndefLength,
+ tagsMd: opts.TagsMd,
+ intDec: opts.IntDec,
+ mapKeyByteString: opts.MapKeyByteString,
+ extraReturnErrors: opts.ExtraReturnErrors,
+ defaultMapType: opts.DefaultMapType,
+ utf8: opts.UTF8,
+ fieldNameMatching: opts.FieldNameMatching,
+ bigIntDec: opts.BigIntDec,
+ defaultByteStringType: opts.DefaultByteStringType,
+ byteStringToString: opts.ByteStringToString,
+ fieldNameByteString: opts.FieldNameByteString,
+ unrecognizedTagToAny: opts.UnrecognizedTagToAny,
+ timeTagToAny: opts.TimeTagToAny,
+ simpleValues: simpleValues,
+ nanDec: opts.NaN,
+ infDec: opts.Inf,
+ byteStringToTime: opts.ByteStringToTime,
+ byteStringExpectedFormat: opts.ByteStringExpectedFormat,
+ bignumTag: opts.BignumTag,
+ binaryUnmarshaler: opts.BinaryUnmarshaler,
+ textUnmarshaler: opts.TextUnmarshaler,
+ jsonUnmarshalerTranscoder: opts.JSONUnmarshalerTranscoder,
+ }
+
+ return &dm, nil
+}
+
+// DecMode is the main interface for CBOR decoding.
+type DecMode interface {
+ // Unmarshal parses the CBOR-encoded data into the value pointed to by v
+ // using the decoding mode. If v is nil, not a pointer, or a nil pointer,
+ // Unmarshal returns an error.
+ //
+ // See the documentation for Unmarshal for details.
+ Unmarshal(data []byte, v any) error
+
+ // UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+ // using the decoding mode. Any remaining bytes are returned in rest.
+ //
+ // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+ //
+ // See the documentation for Unmarshal for details.
+ UnmarshalFirst(data []byte, v any) (rest []byte, err error)
+
+ // Valid checks whether data is a well-formed encoded CBOR data item and
+ // that it complies with configurable restrictions such as MaxNestedLevels,
+ // MaxArrayElements, MaxMapPairs, etc.
+ //
+ // If there are any remaining bytes after the CBOR data item,
+ // an ExtraneousDataError is returned.
+ //
+ // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+ // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+ //
+ // Deprecated: Valid is kept for compatibility and should not be used.
+ // Use Wellformed instead because it has a more appropriate name.
+ Valid(data []byte) error
+
+ // Wellformed checks whether data is a well-formed encoded CBOR data item and
+ // that it complies with configurable restrictions such as MaxNestedLevels,
+ // MaxArrayElements, MaxMapPairs, etc.
+ //
+ // If there are any remaining bytes after the CBOR data item,
+ // an ExtraneousDataError is returned.
+ Wellformed(data []byte) error
+
+ // NewDecoder returns a new decoder that reads from r using dm DecMode.
+ NewDecoder(r io.Reader) *Decoder
+
+ // DecOptions returns user specified options used to create this DecMode.
+ DecOptions() DecOptions
+}
+
+type decMode struct {
+ tags tagProvider
+ dupMapKey DupMapKeyMode
+ timeTag DecTagMode
+ maxNestedLevels int
+ maxArrayElements int
+ maxMapPairs int
+ indefLength IndefLengthMode
+ tagsMd TagsMode
+ intDec IntDecMode
+ mapKeyByteString MapKeyByteStringMode
+ extraReturnErrors ExtraDecErrorCond
+ defaultMapType reflect.Type
+ utf8 UTF8Mode
+ fieldNameMatching FieldNameMatchingMode
+ bigIntDec BigIntDecMode
+ defaultByteStringType reflect.Type
+ byteStringToString ByteStringToStringMode
+ fieldNameByteString FieldNameByteStringMode
+ unrecognizedTagToAny UnrecognizedTagToAnyMode
+ timeTagToAny TimeTagToAnyMode
+ simpleValues *SimpleValueRegistry
+ nanDec NaNMode
+ infDec InfMode
+ byteStringToTime ByteStringToTimeMode
+ byteStringExpectedFormat ByteStringExpectedFormatMode
+ bignumTag BignumTagMode
+ binaryUnmarshaler BinaryUnmarshalerMode
+ textUnmarshaler TextUnmarshalerMode
+ jsonUnmarshalerTranscoder Transcoder
+}
+
+var defaultDecMode, _ = DecOptions{}.decMode()
+
+// DecOptions returns user specified options used to create this DecMode.
+func (dm *decMode) DecOptions() DecOptions {
+ simpleValues := dm.simpleValues
+ if simpleValues == defaultSimpleValues {
+ // Users can't explicitly set this to defaultSimpleValues. It must have been nil in
+ // the original DecOptions.
+ simpleValues = nil
+ }
+
+ return DecOptions{
+ DupMapKey: dm.dupMapKey,
+ TimeTag: dm.timeTag,
+ MaxNestedLevels: dm.maxNestedLevels,
+ MaxArrayElements: dm.maxArrayElements,
+ MaxMapPairs: dm.maxMapPairs,
+ IndefLength: dm.indefLength,
+ TagsMd: dm.tagsMd,
+ IntDec: dm.intDec,
+ MapKeyByteString: dm.mapKeyByteString,
+ ExtraReturnErrors: dm.extraReturnErrors,
+ DefaultMapType: dm.defaultMapType,
+ UTF8: dm.utf8,
+ FieldNameMatching: dm.fieldNameMatching,
+ BigIntDec: dm.bigIntDec,
+ DefaultByteStringType: dm.defaultByteStringType,
+ ByteStringToString: dm.byteStringToString,
+ FieldNameByteString: dm.fieldNameByteString,
+ UnrecognizedTagToAny: dm.unrecognizedTagToAny,
+ TimeTagToAny: dm.timeTagToAny,
+ SimpleValues: simpleValues,
+ NaN: dm.nanDec,
+ Inf: dm.infDec,
+ ByteStringToTime: dm.byteStringToTime,
+ ByteStringExpectedFormat: dm.byteStringExpectedFormat,
+ BignumTag: dm.bignumTag,
+ BinaryUnmarshaler: dm.binaryUnmarshaler,
+ TextUnmarshaler: dm.textUnmarshaler,
+ JSONUnmarshalerTranscoder: dm.jsonUnmarshalerTranscoder,
+ }
+}
+
+// Unmarshal parses the CBOR-encoded data into the value pointed to by v
+// using dm decoding mode. If v is nil, not a pointer, or a nil pointer,
+// Unmarshal returns an error.
+//
+// See the documentation for Unmarshal for details.
+func (dm *decMode) Unmarshal(data []byte, v any) error {
+ d := decoder{data: data, dm: dm}
+
+ // Check well-formedness.
+ off := d.off // Save offset before data validation
+ err := d.wellformed(false, false) // don't allow any extra data after valid data item.
+ d.off = off // Restore offset
+ if err != nil {
+ return err
+ }
+
+ return d.value(v)
+}
+
+// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+// using dm decoding mode. Any remaining bytes are returned in rest.
+//
+// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+//
+// See the documentation for Unmarshal for details.
+func (dm *decMode) UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
+ d := decoder{data: data, dm: dm}
+
+ // check well-formedness.
+ off := d.off // Save offset before data validation
+ err = d.wellformed(true, false) // allow extra data after well-formed data item
+ d.off = off // Restore offset
+
+ // If it is well-formed, parse the value. This is structured like this to allow
+ // better test coverage
+ if err == nil {
+ err = d.value(v)
+ }
+
+ // If either wellformed or value returned an error, do not return rest bytes
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the rest of the data slice (which might be len 0)
+ return d.data[d.off:], nil
+}
+
+// Valid checks whether data is a well-formed encoded CBOR data item and
+// that it complies with configurable restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+//
+// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+//
+// Deprecated: Valid is kept for compatibility and should not be used.
+// Use Wellformed instead because it has a more appropriate name.
+func (dm *decMode) Valid(data []byte) error {
+ return dm.Wellformed(data)
+}
+
+// Wellformed checks whether data is a well-formed encoded CBOR data item and
+// that it complies with configurable restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+func (dm *decMode) Wellformed(data []byte) error {
+ d := decoder{data: data, dm: dm}
+ return d.wellformed(false, false)
+}
+
+// NewDecoder returns a new decoder that reads from r using dm DecMode.
+func (dm *decMode) NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r, d: decoder{dm: dm}}
+}
+
+type decoder struct {
+ data []byte
+ off int // next read offset in data
+ dm *decMode
+
+ // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags,
+ // if any.
+ //
+ // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding
+ // byte strings, the effective encoding comes from the tag nearest to the byte string being
+ // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be
+ // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective
+ // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21,
+ // respectively.
+ expectedLaterEncodingTags []uint64
+}
+
+// value decodes CBOR data item into the value pointed to by v.
+// If CBOR data item fails to be decoded into v,
+// error is returned and offset is moved to the next CBOR data item.
+// Precondition: d.data contains at least one well-formed CBOR data item.
+func (d *decoder) value(v any) error {
+ // v can't be nil, non-pointer, or nil pointer value.
+ if v == nil {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"}
+ }
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Pointer {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"}
+ } else if rv.IsNil() {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"}
+ }
+ rv = rv.Elem()
+ return d.parseToValue(rv, getTypeInfo(rv.Type()))
+}
+
+// parseToValue decodes CBOR data to value. It assumes data is well-formed,
+// and does not perform bounds checking.
+func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+
+ // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil.
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer {
+ d.skip()
+ v.SetZero()
+ return nil
+ }
+
+ if tInfo.spclType == specialTypeIface {
+ if !v.IsNil() {
+ // Use value type
+ v = v.Elem()
+ tInfo = getTypeInfo(v.Type())
+ } else { //nolint:gocritic
+ // Create and use registered type if CBOR data is registered tag
+ if d.dm.tags != nil && d.nextCBORType() == cborTypeTag {
+
+ off := d.off
+ var tagNums []uint64
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ tagNums = append(tagNums, tagNum)
+ }
+ d.off = off
+
+ registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
+ if registeredType != nil {
+ if registeredType.Implements(tInfo.nonPtrType) ||
+ reflect.PointerTo(registeredType).Implements(tInfo.nonPtrType) {
+ v.Set(reflect.New(registeredType))
+ v = v.Elem()
+ tInfo = getTypeInfo(registeredType)
+ }
+ }
+ }
+ }
+ }
+
+ // Create new value for the pointer v to point to.
+ // At this point, CBOR value is not nil/undefined if v is a pointer.
+ for v.Kind() == reflect.Pointer {
+ if v.IsNil() {
+ if !v.CanSet() {
+ d.skip()
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+
+ // Strip self-described CBOR tag number.
+ for d.nextCBORType() == cborTypeTag {
+ off := d.off
+ _, _, tagNum := d.getHead()
+ if tagNum != tagNumSelfDescribedCBOR {
+ d.off = off
+ break
+ }
+ }
+
+ // Check validity of supported built-in tags.
+ off := d.off
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil {
+ d.skip()
+ return err
+ }
+ }
+ d.off = off
+
+ if tInfo.spclType != specialTypeNone {
+ switch tInfo.spclType {
+ case specialTypeEmptyIface:
+ iv, err := d.parse(false) // Skipped self-described CBOR tag number already.
+ if iv != nil {
+ v.Set(reflect.ValueOf(iv))
+ }
+ return err
+
+ case specialTypeTag:
+ return d.parseToTag(v)
+
+ case specialTypeTime:
+ if d.nextCBORNil() {
+ // Decoding CBOR null and undefined to time.Time is no-op.
+ d.skip()
+ return nil
+ }
+ tm, ok, err := d.parseToTime()
+ if err != nil {
+ return err
+ }
+ if ok {
+ v.Set(reflect.ValueOf(tm))
+ }
+ return nil
+
+ case specialTypeUnmarshalerIface:
+ return d.parseToUnmarshaler(v)
+
+ case specialTypeUnexportedUnmarshalerIface:
+ return d.parseToUnexportedUnmarshaler(v)
+
+ case specialTypeJSONUnmarshalerIface:
+ // This special type implies that the type does not also implement
+ // cbor.Umarshaler.
+ if d.dm.jsonUnmarshalerTranscoder == nil {
+ break
+ }
+ return d.parseToJSONUnmarshaler(v)
+ }
+ }
+
+ // Check registered tag number
+ if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil {
+ t := d.nextCBORType()
+ if t != cborTypeTag {
+ if tagItem.opts.DecTag == DecTagRequired {
+ d.skip() // Required tag number is absent, skip entire tag
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.typ.String(),
+ errorMsg: "expect CBOR tag value"}
+ }
+ } else if err := d.validRegisteredTagNums(tagItem); err != nil {
+ d.skip() // Skip tag content
+ return err
+ }
+ }
+
+ t := d.nextCBORType()
+
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+ return fillPositiveInt(t, val, v)
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ // CBOR negative integer overflows int64, use big.Int to store value.
+ bi := new(big.Int)
+ bi.SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows Go's int64",
+ }
+ }
+ nValue := int64(-1) ^ int64(val)
+ return fillNegativeInt(t, nValue, v)
+
+ case cborTypeByteString:
+ b, copied := d.parseByteString()
+ b, converted, err := d.applyByteStringTextConversion(b, v.Type())
+ if err != nil {
+ return err
+ }
+ copied = copied || converted
+ return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
+
+ case cborTypeTextString:
+ b, err := d.parseTextString()
+ if err != nil {
+ return err
+ }
+ return fillTextString(t, b, v, d.dm.textUnmarshaler)
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ switch ai {
+ case additionalInformationAsFloat16:
+ f := float64(float16.Frombits(uint16(val)).Float32())
+ return fillFloat(t, f, v)
+
+ case additionalInformationAsFloat32:
+ f := float64(math.Float32frombits(uint32(val)))
+ return fillFloat(t, f, v)
+
+ case additionalInformationAsFloat64:
+ f := math.Float64frombits(val)
+ return fillFloat(t, f, v)
+
+ default: // ai <= 24
+ if d.dm.simpleValues.rejected[SimpleValue(val)] {
+ return &UnacceptableDataItemError{
+ CBORType: t.String(),
+ Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized",
+ }
+ }
+
+ switch ai {
+ case additionalInformationAsFalse,
+ additionalInformationAsTrue:
+ return fillBool(t, ai == additionalInformationAsTrue, v)
+
+ case additionalInformationAsNull,
+ additionalInformationAsUndefined:
+ return fillNil(t, v)
+
+ default:
+ return fillPositiveInt(t, val, v)
+ }
+ }
+
+ case cborTypeTag:
+ _, _, tagNum := d.getHead()
+ switch tagNum {
+ case tagNumUnsignedBignum:
+ // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int.
+ b, copied := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
+ return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
+ }
+ if bi.IsUint64() {
+ return fillPositiveInt(t, bi.Uint64(), v)
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows " + v.Type().String(),
+ }
+
+ case tagNumNegativeBignum:
+ // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int.
+ b, copied := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
+ return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
+ }
+ if bi.IsInt64() {
+ return fillNegativeInt(t, bi.Int64(), v)
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows " + v.Type().String(),
+ }
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // If conversion for interoperability with text encodings is not configured,
+ // treat tags 21-23 as unregistered tags.
+ if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone {
+ d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum)
+ defer func() {
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1]
+ }()
+ }
+ }
+
+ return d.parseToValue(v, tInfo)
+
+ case cborTypeArray:
+ if tInfo.nonPtrKind == reflect.Slice {
+ return d.parseArrayToSlice(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Array {
+ return d.parseArrayToArray(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Struct {
+ return d.parseArrayToStruct(v, tInfo)
+ }
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()}
+
+ case cborTypeMap:
+ if tInfo.nonPtrKind == reflect.Struct {
+ return d.parseMapToStruct(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Map {
+ return d.parseMapToMap(v, tInfo)
+ }
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()}
+ }
+
+ return nil
+}
+
+func (d *decoder) parseToTag(v reflect.Value) error {
+ if d.nextCBORNil() {
+ // Decoding CBOR null and undefined to cbor.Tag is no-op.
+ d.skip()
+ return nil
+ }
+
+ t := d.nextCBORType()
+ if t != cborTypeTag {
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()}
+ }
+
+ // Unmarshal tag number
+ _, _, num := d.getHead()
+
+ // Unmarshal tag content
+ content, err := d.parse(false)
+ if err != nil {
+ return err
+ }
+
+ v.Set(reflect.ValueOf(Tag{num, content}))
+ return nil
+}
+
+// parseToTime decodes the current data item as a time.Time. The bool return value is false if and
+// only if the destination value should remain unmodified.
+func (d *decoder) parseToTime() (time.Time, bool, error) {
+ // Verify that tag number or absence of tag number is acceptable to specified timeTag.
+ if t := d.nextCBORType(); t == cborTypeTag {
+ if d.dm.timeTag == DecTagIgnored {
+ // Skip all enclosing tags
+ for t == cborTypeTag {
+ d.getHead()
+ t = d.nextCBORType()
+ }
+ if d.nextCBORNil() {
+ d.skip()
+ return time.Time{}, false, nil
+ }
+ } else {
+ // Read tag number
+ _, _, tagNum := d.getHead()
+ if tagNum != 0 && tagNum != 1 {
+ d.skip() // skip tag content
+ return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1")
+ }
+ }
+ } else {
+ if d.dm.timeTag == DecTagRequired {
+ d.skip()
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"}
+ }
+ }
+
+ switch t := d.nextCBORType(); t {
+ case cborTypeByteString:
+ if d.dm.byteStringToTime == ByteStringToTimeAllowed {
+ b, _ := d.parseByteString()
+ t, err := time.Parse(time.RFC3339, string(b))
+ if err != nil {
+ return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err)
+ }
+ return t, true, nil
+ }
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+
+ case cborTypeTextString:
+ s, err := d.parseTextString()
+ if err != nil {
+ return time.Time{}, false, err
+ }
+ t, err := time.Parse(time.RFC3339, string(s))
+ if err != nil {
+ return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error())
+ }
+ return t, true, nil
+
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: fmt.Sprintf("%d overflows Go's int64", val),
+ }
+ }
+ return time.Unix(int64(val), 0), true, nil
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ if val == math.MaxUint64 {
+ // Maximum absolute value representable by negative integer is 2^64,
+ // not 2^64-1, so it overflows uint64.
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: "-18446744073709551616 overflows Go's int64",
+ }
+ }
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1),
+ }
+ }
+ return time.Unix(int64(-1)^int64(val), 0), true, nil
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ var f float64
+ switch ai {
+ case additionalInformationAsFloat16:
+ f = float64(float16.Frombits(uint16(val)).Float32())
+
+ case additionalInformationAsFloat32:
+ f = float64(math.Float32frombits(uint32(val)))
+
+ case additionalInformationAsFloat64:
+ f = math.Float64frombits(val)
+
+ default:
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+ }
+
+ if math.IsNaN(f) || math.IsInf(f, 0) {
+ // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6
+ return time.Time{}, true, nil
+ }
+ seconds, fractional := math.Modf(f)
+ return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil
+
+ default:
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+ }
+}
+
+// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
+ d.skip()
+ return nil
+ }
+
+ if v.Kind() != reflect.Pointer && v.CanAddr() {
+ v = v.Addr()
+ }
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ start := d.off
+ d.skip()
+ return u.UnmarshalCBOR(d.data[start:d.off])
+ }
+ d.skip()
+ return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler")
+}
+
+// parseToUnexportedUnmarshaler parses CBOR data to value implementing unmarshaler interface.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parseToUnexportedUnmarshaler(v reflect.Value) error {
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
+ d.skip()
+ return nil
+ }
+
+ if v.Kind() != reflect.Pointer && v.CanAddr() {
+ v = v.Addr()
+ }
+ if u, ok := v.Interface().(unmarshaler); ok {
+ start := d.off
+ d.skip()
+ return u.unmarshalCBOR(d.data[start:d.off])
+ }
+ d.skip()
+ return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.unmarshaler")
+}
+
+// parseToJSONUnmarshaler parses CBOR data to be transcoded to JSON and passed to the value's
+// implementation of the json.Unmarshaler interface. It assumes data is well-formed, and does not
+// perform bounds checking.
+func (d *decoder) parseToJSONUnmarshaler(v reflect.Value) error {
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
+ d.skip()
+ return nil
+ }
+
+ if v.Kind() != reflect.Pointer && v.CanAddr() {
+ v = v.Addr()
+ }
+ if u, ok := v.Interface().(jsonUnmarshaler); ok {
+ start := d.off
+ d.skip()
+ e := getEncodeBuffer()
+ defer putEncodeBuffer(e)
+ if err := d.dm.jsonUnmarshalerTranscoder.Transcode(e, bytes.NewReader(d.data[start:d.off])); err != nil {
+ return &TranscodeError{err: err, rtype: v.Type(), sourceFormat: "cbor", targetFormat: "json"}
+ }
+ return u.UnmarshalJSON(e.Bytes())
+ }
+ d.skip()
+ return errors.New("cbor: failed to assert " + v.Type().String() + " as json.Unmarshaler")
+}
+
+// parse parses CBOR data and returns value in default Go type.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyclo
+ // Strip self-described CBOR tag number.
+ if skipSelfDescribedTag {
+ for d.nextCBORType() == cborTypeTag {
+ off := d.off
+ _, _, tagNum := d.getHead()
+ if tagNum != tagNumSelfDescribedCBOR {
+ d.off = off
+ break
+ }
+ }
+ }
+
+ // Check validity of supported built-in tags.
+ off := d.off
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil {
+ d.skip()
+ return nil, err
+ }
+ }
+ d.off = off
+
+ t := d.nextCBORType()
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+
+ switch d.dm.intDec {
+ case IntDecConvertNone:
+ return val, nil
+
+ case IntDecConvertSigned, IntDecConvertSignedOrFail:
+ if val > math.MaxInt64 {
+ return nil, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64",
+ }
+ }
+
+ return int64(val), nil
+
+ case IntDecConvertSignedOrBigInt:
+ if val > math.MaxInt64 {
+ bi := new(big.Int).SetUint64(val)
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+ }
+
+ return int64(val), nil
+
+ default:
+ // not reachable
+ }
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+
+ if val > math.MaxInt64 {
+ // CBOR negative integer value overflows Go int64, use big.Int instead.
+ bi := new(big.Int).SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if d.dm.intDec == IntDecConvertSignedOrFail {
+ return nil, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: bi.String() + " overflows Go's int64",
+ }
+ }
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+ }
+
+ nValue := int64(-1) ^ int64(val)
+ return nValue, nil
+
+ case cborTypeByteString:
+ b, copied := d.parseByteString()
+ var effectiveByteStringType = d.dm.defaultByteStringType
+ if effectiveByteStringType == nil {
+ effectiveByteStringType = typeByteSlice
+ }
+ b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType)
+ if err != nil {
+ return nil, err
+ }
+ copied = copied || converted
+
+ switch effectiveByteStringType {
+ case typeByteSlice:
+ if copied {
+ return b, nil
+ }
+ clone := make([]byte, len(b))
+ copy(clone, b)
+ return clone, nil
+
+ case typeString:
+ return string(b), nil
+
+ default:
+ if copied || d.dm.defaultByteStringType.Kind() == reflect.String {
+ // Avoid an unnecessary copy since the conversion to string must
+ // copy the underlying bytes.
+ return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil
+ }
+ clone := make([]byte, len(b))
+ copy(clone, b)
+ return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil
+ }
+
+ case cborTypeTextString:
+ b, err := d.parseTextString()
+ if err != nil {
+ return nil, err
+ }
+ return string(b), nil
+
+ case cborTypeTag:
+ tagOff := d.off
+ _, _, tagNum := d.getHead()
+ contentOff := d.off
+
+ switch tagNum {
+ case tagNumRFC3339Time, tagNumEpochTime:
+ d.off = tagOff
+ tm, _, err := d.parseToTime()
+ if err != nil {
+ return nil, err
+ }
+
+ switch d.dm.timeTagToAny {
+ case TimeTagToTime:
+ return tm, nil
+
+ case TimeTagToRFC3339:
+ if tagNum == 1 {
+ tm = tm.UTC()
+ }
+ // Call time.MarshalText() to format decoded time to RFC3339 format,
+ // and return error on time value that cannot be represented in
+ // RFC3339 format. E.g. year cannot exceed 9999, etc.
+ text, err := tm.Truncate(time.Second).MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err)
+ }
+ return string(text), nil
+
+ case TimeTagToRFC3339Nano:
+ if tagNum == 1 {
+ tm = tm.UTC()
+ }
+ // Call time.MarshalText() to format decoded time to RFC3339 format,
+ // and return error on time value that cannot be represented in
+ // RFC3339 format with sub-second precision.
+ text, err := tm.MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err)
+ }
+ return string(text), nil
+
+ default:
+ // not reachable
+ }
+
+ case tagNumUnsignedBignum:
+ b, _ := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+
+ case tagNumNegativeBignum:
+ b, _ := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // If conversion for interoperability with text encodings is not configured,
+ // treat tags 21-23 as unregistered tags.
+ if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding ||
+ d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone {
+ d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum)
+ defer func() {
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1]
+ }()
+ return d.parse(false)
+ }
+ }
+
+ if d.dm.tags != nil {
+ // Parse to specified type if tag number is registered.
+ tagNums := []uint64{tagNum}
+ for d.nextCBORType() == cborTypeTag {
+ _, _, num := d.getHead()
+ tagNums = append(tagNums, num)
+ }
+ registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
+ if registeredType != nil {
+ d.off = tagOff
+ rv := reflect.New(registeredType)
+ if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil {
+ return nil, err
+ }
+ return rv.Elem().Interface(), nil
+ }
+ }
+
+ // Parse tag content
+ d.off = contentOff
+ content, err := d.parse(false)
+ if err != nil {
+ return nil, err
+ }
+ if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny {
+ return content, nil
+ }
+ return Tag{tagNum, content}, nil
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] {
+ return nil, &UnacceptableDataItemError{
+ CBORType: t.String(),
+ Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized",
+ }
+ }
+ if ai < 20 || ai == 24 {
+ return SimpleValue(val), nil
+ }
+
+ switch ai {
+ case additionalInformationAsFalse,
+ additionalInformationAsTrue:
+ return (ai == additionalInformationAsTrue), nil
+
+ case additionalInformationAsNull,
+ additionalInformationAsUndefined:
+ return nil, nil
+
+ case additionalInformationAsFloat16:
+ f := float64(float16.Frombits(uint16(val)).Float32())
+ return f, nil
+
+ case additionalInformationAsFloat32:
+ f := float64(math.Float32frombits(uint32(val)))
+ return f, nil
+
+ case additionalInformationAsFloat64:
+ f := math.Float64frombits(val)
+ return f, nil
+ }
+
+ case cborTypeArray:
+ return d.parseArray()
+
+ case cborTypeMap:
+ if d.dm.defaultMapType != nil {
+ m := reflect.New(d.dm.defaultMapType)
+ err := d.parseToValue(m, getTypeInfo(m.Elem().Type()))
+ if err != nil {
+ return nil, err
+ }
+ return m.Elem().Interface(), nil
+ }
+ return d.parseMap()
+ }
+
+ return nil, nil
+}
+
+// parseByteString parses a CBOR encoded byte string. The returned byte slice
+// may be backed directly by the input. The second return value will be true if
+// and only if the slice is backed by a copy of the input. Callers are
+// responsible for making a copy if necessary.
+func (d *decoder) parseByteString() ([]byte, bool) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ if !indefiniteLength {
+ b := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ return b, false
+ }
+ // Process indefinite length string chunks.
+ b := []byte{}
+ for !d.foundBreak() {
+ _, _, val = d.getHead()
+ b = append(b, d.data[d.off:d.off+int(val)]...)
+ d.off += int(val)
+ }
+ return b, true
+}
+
+// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text
+// encoding. If no transformation was performed (because it was not required), the original byte
+// slice is returned and the bool return value is false. Otherwise, a new slice containing the
+// converted bytes is returned along with the bool value true.
+func (d *decoder) applyByteStringTextConversion(
+ src []byte,
+ dstType reflect.Type,
+) (
+ dst []byte,
+ transformed bool,
+ err error,
+) {
+ switch dstType.Kind() {
+ case reflect.String:
+ if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 {
+ return src, false, nil
+ }
+
+ switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] {
+ case tagNumExpectedLaterEncodingBase64URL:
+ encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src)))
+ base64.RawURLEncoding.Encode(encoded, src)
+ return encoded, true, nil
+
+ case tagNumExpectedLaterEncodingBase64:
+ encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src)))
+ base64.StdEncoding.Encode(encoded, src)
+ return encoded, true, nil
+
+ case tagNumExpectedLaterEncodingBase16:
+ encoded := make([]byte, hex.EncodedLen(len(src)))
+ hex.Encode(encoded, src)
+ return encoded, true, nil
+
+ default:
+ // If this happens, there is a bug: the decoder has pushed an invalid
+ // "expected later encoding" tag to the stack.
+ panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags))
+ }
+
+ case reflect.Slice:
+ if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 {
+ // Either the destination is not a slice of bytes, or the encoder that
+ // produced the input indicated an expected text encoding tag and therefore
+ // the content of the byte string has NOT been text encoded.
+ return src, false, nil
+ }
+
+ switch d.dm.byteStringExpectedFormat {
+ case ByteStringExpectedBase64URL:
+ decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src)))
+ n, err := base64.RawURLEncoding.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err)
+ }
+ return decoded[:n], true, nil
+
+ case ByteStringExpectedBase64:
+ decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src)))
+ n, err := base64.StdEncoding.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err)
+ }
+ return decoded[:n], true, nil
+
+ case ByteStringExpectedBase16:
+ decoded := make([]byte, hex.DecodedLen(len(src)))
+ n, err := hex.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err)
+ }
+ return decoded[:n], true, nil
+ }
+ }
+
+ return src, false, nil
+}
+
+// parseTextString parses CBOR encoded text string. It returns a byte slice
+// to prevent creating an extra copy of string. Caller should wrap returned
+// byte slice as string when needed.
+func (d *decoder) parseTextString() ([]byte, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ if !indefiniteLength {
+ b := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) {
+ return nil, &SemanticError{"cbor: invalid UTF-8 string"}
+ }
+ return b, nil
+ }
+ // Process indefinite length string chunks.
+ b := []byte{}
+ for !d.foundBreak() {
+ _, _, val = d.getHead()
+ x := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) {
+ for !d.foundBreak() {
+ d.skip() // Skip remaining chunk on error
+ }
+ return nil, &SemanticError{"cbor: invalid UTF-8 string"}
+ }
+ b = append(b, x...)
+ }
+ return b, nil
+}
+
+func (d *decoder) parseArray() ([]any, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
+ }
+ v := make([]any, count)
+ var e any
+ var err, lastErr error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ if e, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+ v[i] = e
+ }
+ return v, err
+}
+
+func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
+ }
+ if v.IsNil() || v.Cap() < count || count == 0 {
+ v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count))
+ }
+ v.SetLen(count)
+ var err error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ gi := 0
+ vLen := v.Len()
+ var err error
+ for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ {
+ if gi < vLen {
+ // Read CBOR array element and set array element
+ if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ }
+ gi++
+ } else {
+ d.skip() // Skip remaining CBOR array element
+ }
+ }
+ // Set remaining Go array elements to zero values.
+ if gi < vLen {
+ for ; gi < vLen; gi++ {
+ v.Index(gi).SetZero()
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseMap() (any, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ m := make(map[any]any)
+ var k, e any
+ var err, lastErr error
+ keyCount := 0
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ // Parse CBOR map key.
+ if k, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip()
+ continue
+ }
+
+ // Detect if CBOR map key can be used as Go map key.
+ rv := reflect.ValueOf(k)
+ if !isHashableValue(rv) {
+ var converted bool
+ if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
+ k, converted = convertByteSliceToByteString(k)
+ }
+ if !converted {
+ if err == nil {
+ err = &InvalidMapKeyTypeError{rv.Type().String()}
+ }
+ d.skip()
+ continue
+ }
+ }
+
+ // Parse CBOR map value.
+ if e, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+
+ // Add key-value pair to Go map.
+ m[k] = e
+
+ // Detect duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ newKeyCount := len(m)
+ if newKeyCount == keyCount {
+ m[k] = nil
+ err = &DupMapKeyError{k, i}
+ i++
+ // skip the rest of the map
+ for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ d.skip() // Skip map key
+ d.skip() // Skip map value
+ }
+ return m, err
+ }
+ keyCount = newKeyCount
+ }
+ }
+ return m, err
+}
+
+func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if v.IsNil() {
+ mapsize := count
+ if !hasSize {
+ mapsize = 0
+ }
+ v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize))
+ }
+ keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ
+ reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind)
+ var keyValue, eleValue reflect.Value
+ keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable.
+ var err, lastErr error
+ keyCount := v.Len()
+ var existingKeys map[any]bool // Store existing map keys, used for detecting duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ existingKeys = make(map[any]bool, keyCount)
+ if keyCount > 0 {
+ vKeys := v.MapKeys()
+ for i := 0; i < len(vKeys); i++ {
+ existingKeys[vKeys[i].Interface()] = true
+ }
+ }
+ }
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ // Parse CBOR map key.
+ if !keyValue.IsValid() {
+ keyValue = reflect.New(keyType).Elem()
+ } else if !reuseKey {
+ keyValue.SetZero()
+ }
+ if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip()
+ continue
+ }
+
+ // Detect if CBOR map key can be used as Go map key.
+ if keyIsInterfaceType && keyValue.Elem().IsValid() {
+ if !isHashableValue(keyValue.Elem()) {
+ var converted bool
+ if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
+ var k any
+ k, converted = convertByteSliceToByteString(keyValue.Elem().Interface())
+ if converted {
+ keyValue.Set(reflect.ValueOf(k))
+ }
+ }
+ if !converted {
+ if err == nil {
+ err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()}
+ }
+ d.skip()
+ continue
+ }
+ }
+ }
+
+ // Parse CBOR map value.
+ if !eleValue.IsValid() {
+ eleValue = reflect.New(eleType).Elem()
+ } else if !reuseEle {
+ eleValue.SetZero()
+ }
+ if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+
+ // Add key-value pair to Go map.
+ v.SetMapIndex(keyValue, eleValue)
+
+ // Detect duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ newKeyCount := v.Len()
+ if newKeyCount == keyCount {
+ kvi := keyValue.Interface()
+ if !existingKeys[kvi] {
+ v.SetMapIndex(keyValue, reflect.New(eleType).Elem())
+ err = &DupMapKeyError{kvi, i}
+ i++
+ // skip the rest of the map
+ for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ d.skip() // skip map key
+ d.skip() // skip map value
+ }
+ return err
+ }
+ delete(existingKeys, kvi)
+ }
+ keyCount = newKeyCount
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error {
+ structType := getDecodingStructType(tInfo.nonPtrType)
+ if structType.err != nil {
+ return structType.err
+ }
+
+ if !structType.toArray {
+ t := d.nextCBORType()
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: "cannot decode CBOR array to struct without toarray option",
+ }
+ }
+
+ start := d.off
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size
+ }
+ if count != len(structType.fields) {
+ d.off = start
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: cborTypeArray.String(),
+ GoType: tInfo.typ.String(),
+ errorMsg: "cannot decode CBOR array to struct with different number of elements",
+ }
+ }
+ var err, lastErr error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ f := structType.fields[i]
+
+ // Get field value by index
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) {
+ // Return a new value for embedded field null pointer to point to, or return error.
+ if !v.CanSet() {
+ return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ return v, nil
+ })
+ if lastErr != nil && err == nil {
+ err = lastErr
+ }
+ if !fv.IsValid() {
+ d.skip()
+ continue
+ }
+ }
+
+ if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil {
+ if err == nil {
+ if typeError, ok := lastErr.(*UnmarshalTypeError); ok {
+ typeError.StructFieldName = tInfo.typ.String() + "." + f.name
+ err = typeError
+ } else {
+ err = lastErr
+ }
+ }
+ }
+ }
+ return err
+}
+
+// parseMapToStruct needs to be fast so gocyclo can be ignored for now.
+func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+ structType := getDecodingStructType(tInfo.nonPtrType)
+ if structType.err != nil {
+ return structType.err
+ }
+
+ if structType.toArray {
+ t := d.nextCBORType()
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: "cannot decode CBOR map to struct with toarray option",
+ }
+ }
+
+ var err, lastErr error
+
+ // Get CBOR map size
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+
+ // Keeps track of matched struct fields
+ var foundFldIdx []bool
+ {
+ const maxStackFields = 128
+ if nfields := len(structType.fields); nfields <= maxStackFields {
+ // For structs with typical field counts, expect that this can be
+ // stack-allocated.
+ var a [maxStackFields]bool
+ foundFldIdx = a[:nfields]
+ } else {
+ foundFldIdx = make([]bool, len(structType.fields))
+ }
+ }
+
+ // Keeps track of CBOR map keys to detect duplicate map key
+ keyCount := 0
+ var mapKeys map[any]struct{}
+
+ errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0
+
+MapEntryLoop:
+ for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ var f *field
+
+ // If duplicate field detection is enabled and the key at index j did not match any
+ // field, k will hold the map key.
+ var k any
+
+ t := d.nextCBORType()
+ if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) {
+ var keyBytes []byte
+ if t == cborTypeTextString {
+ keyBytes, lastErr = d.parseTextString()
+ if lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip() // skip value
+ continue
+ }
+ } else { // cborTypeByteString
+ keyBytes, _ = d.parseByteString()
+ }
+
+ // Check for exact match on field name.
+ if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok {
+ fld := structType.fields[i]
+
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{fld.name, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ }
+
+ // Find field with case-insensitive match
+ if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive {
+ keyLen := len(keyBytes)
+ keyString := string(keyBytes)
+ for i := 0; i < len(structType.fields); i++ {
+ fld := structType.fields[i]
+ if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) {
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{keyString, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ break
+ }
+ }
+ }
+
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil {
+ k = string(keyBytes)
+ }
+ } else if t <= cborTypeNegativeInt { // uint/int
+ var nameAsInt int64
+
+ if t == cborTypePositiveInt {
+ _, _, val := d.getHead()
+ nameAsInt = int64(val)
+ } else {
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ if err == nil {
+ err = &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64",
+ }
+ }
+ d.skip() // skip value
+ continue
+ }
+ nameAsInt = int64(-1) ^ int64(val)
+ }
+
+ // Find field
+ for i := 0; i < len(structType.fields); i++ {
+ fld := structType.fields[i]
+ if fld.keyAsInt && fld.nameAsInt == nameAsInt {
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{nameAsInt, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ break
+ }
+ }
+
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil {
+ k = nameAsInt
+ }
+ } else {
+ if err == nil {
+ err = &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf("").String(),
+ errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name",
+ }
+ }
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ // parse key
+ k, lastErr = d.parse(true)
+ if lastErr != nil {
+ d.skip() // skip value
+ continue
+ }
+ // Detect if CBOR map key can be used as Go map key.
+ if !isHashableValue(reflect.ValueOf(k)) {
+ d.skip() // skip value
+ continue
+ }
+ } else {
+ d.skip() // skip key
+ }
+ }
+
+ if f == nil {
+ if errOnUnknownField {
+ err = &UnknownFieldError{j}
+ d.skip() // Skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ }
+
+ // Two map keys that match the same struct field are immediately considered
+ // duplicates. This check detects duplicates between two map keys that do
+ // not match a struct field. If unknown field errors are enabled, then this
+ // check is never reached.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ if mapKeys == nil {
+ mapKeys = make(map[any]struct{}, 1)
+ }
+ mapKeys[k] = struct{}{}
+ newKeyCount := len(mapKeys)
+ if newKeyCount == keyCount {
+ err = &DupMapKeyError{k, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ }
+ keyCount = newKeyCount
+ }
+
+ d.skip() // Skip value
+ continue
+ }
+
+ // Get field value by index
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) {
+ // Return a new value for embedded field null pointer to point to, or return error.
+ if !v.CanSet() {
+ return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ return v, nil
+ })
+ if lastErr != nil && err == nil {
+ err = lastErr
+ }
+ if !fv.IsValid() {
+ d.skip()
+ continue
+ }
+ }
+
+ if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil {
+ if err == nil {
+ if typeError, ok := lastErr.(*UnmarshalTypeError); ok {
+ typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name
+ err = typeError
+ } else {
+ err = lastErr
+ }
+ }
+ }
+ }
+ return err
+}
+
+// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t.
+// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content.
+func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error {
+ // Scan until next cbor data is tag content.
+ tagNums := make([]uint64, 0, 1)
+ for d.nextCBORType() == cborTypeTag {
+ _, _, val := d.getHead()
+ tagNums = append(tagNums, val)
+ }
+
+ if !registeredTag.equalTagNum(tagNums) {
+ return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums}
+ }
+ return nil
+}
+
+func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem {
+ if d.dm.tags != nil {
+ return d.dm.tags.getTagItemFromType(vt)
+ }
+ return nil
+}
+
+// skip moves data offset to the next item. skip assumes data is well-formed,
+// and does not perform bounds checking.
+func (d *decoder) skip() {
+ t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+
+ if indefiniteLength {
+ switch t {
+ case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap:
+ for {
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ return
+ }
+ d.skip()
+ }
+ }
+ }
+
+ switch t {
+ case cborTypeByteString, cborTypeTextString:
+ d.off += int(val)
+
+ case cborTypeArray:
+ for i := 0; i < int(val); i++ {
+ d.skip()
+ }
+
+ case cborTypeMap:
+ for i := 0; i < int(val)*2; i++ {
+ d.skip()
+ }
+
+ case cborTypeTag:
+ d.skip()
+ }
+}
+
+func (d *decoder) getHeadWithIndefiniteLengthFlag() (
+ t cborType,
+ ai byte,
+ val uint64,
+ indefiniteLength bool,
+) {
+ t, ai, val = d.getHead()
+ indefiniteLength = additionalInformation(ai).isIndefiniteLength()
+ return
+}
+
+// getHead assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) getHead() (t cborType, ai byte, val uint64) {
+ t, ai = parseInitialByte(d.data[d.off])
+ val = uint64(ai)
+ d.off++
+
+ if ai <= maxAdditionalInformationWithoutArgument {
+ return
+ }
+
+ if ai == additionalInformationWith1ByteArgument {
+ val = uint64(d.data[d.off])
+ d.off++
+ return
+ }
+
+ if ai == additionalInformationWith2ByteArgument {
+ const argumentSize = 2
+ val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ return
+ }
+
+ if ai == additionalInformationWith4ByteArgument {
+ const argumentSize = 4
+ val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ return
+ }
+
+ if ai == additionalInformationWith8ByteArgument {
+ const argumentSize = 8
+ val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
+ d.off += argumentSize
+ return
+ }
+ return
+}
+
+func (d *decoder) numOfItemsUntilBreak() int {
+ savedOff := d.off
+ i := 0
+ for !d.foundBreak() {
+ d.skip()
+ i++
+ }
+ d.off = savedOff
+ return i
+}
+
+// foundBreak returns true if next byte is CBOR break code and moves cursor by 1,
+// otherwise it returns false.
+// foundBreak assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) foundBreak() bool {
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ return true
+ }
+ return false
+}
+
+func (d *decoder) reset(data []byte) {
+ d.data = data
+ d.off = 0
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0]
+}
+
+func (d *decoder) nextCBORType() cborType {
+ return getType(d.data[d.off])
+}
+
+func (d *decoder) nextCBORNil() bool {
+ return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7
+}
+
+type jsonUnmarshaler interface{ UnmarshalJSON([]byte) error }
+
+var (
+ typeIntf = reflect.TypeOf([]any(nil)).Elem()
+ typeTime = reflect.TypeOf(time.Time{})
+ typeBigInt = reflect.TypeOf(big.Int{})
+ typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ typeUnexportedUnmarshaler = reflect.TypeOf((*unmarshaler)(nil)).Elem()
+ typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+ typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ typeJSONUnmarshaler = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+ typeString = reflect.TypeOf("")
+ typeByteSlice = reflect.TypeOf([]byte(nil))
+)
+
+func fillNil(_ cborType, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Interface, reflect.Pointer:
+ v.SetZero()
+ return nil
+ }
+ return nil
+}
+
+func fillPositiveInt(t cborType, val uint64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if val > math.MaxInt64 {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ if v.OverflowInt(int64(val)) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetInt(int64(val))
+ return nil
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if v.OverflowUint(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetUint(val)
+ return nil
+
+ case reflect.Float32, reflect.Float64:
+ f := float64(val)
+ v.SetFloat(f)
+ return nil
+ }
+
+ if v.Type() == typeBigInt {
+ i := new(big.Int).SetUint64(val)
+ v.Set(reflect.ValueOf(*i))
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillNegativeInt(t cborType, val int64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.OverflowInt(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetInt(val)
+ return nil
+
+ case reflect.Float32, reflect.Float64:
+ f := float64(val)
+ v.SetFloat(f)
+ return nil
+ }
+ if v.Type() == typeBigInt {
+ i := new(big.Int).SetInt64(val)
+ v.Set(reflect.ValueOf(*i))
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillBool(t cborType, val bool, v reflect.Value) error {
+ if v.Kind() == reflect.Bool {
+ v.SetBool(val)
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillFloat(t cborType, val float64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Float32, reflect.Float64:
+ if v.OverflowFloat(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetFloat(val)
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode, tum TextUnmarshalerMode) error {
+ if bum == BinaryUnmarshalerByteString && reflect.PointerTo(v.Type()).Implements(typeBinaryUnmarshaler) {
+ if v.CanAddr() {
+ v = v.Addr()
+ if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok {
+ // The contract of BinaryUnmarshaler forbids
+ // retaining the input bytes, so no copying is
+ // required even if val is shared.
+ return u.UnmarshalBinary(val)
+ }
+ }
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+ if bsts != ByteStringToStringForbidden {
+ if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
+ if v.CanAddr() {
+ v = v.Addr()
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ // The contract of TextUnmarshaler forbids retaining the input
+ // bytes, so no copying is required even if val is shared.
+ if err := u.UnmarshalText(val); err != nil {
+ return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
+ }
+ return nil
+ }
+ }
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+
+ if v.Kind() == reflect.String {
+ v.SetString(string(val))
+ return nil
+ }
+ }
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+ src := val
+ if shared {
+ // SetBytes shares the underlying bytes of the source slice.
+ src = make([]byte, len(val))
+ copy(src, val)
+ }
+ v.SetBytes(src)
+ return nil
+ }
+ if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 {
+ vLen := v.Len()
+ i := 0
+ for ; i < vLen && i < len(val); i++ {
+ v.Index(i).SetUint(uint64(val[i]))
+ }
+ // Set remaining Go array elements to zero values.
+ if i < vLen {
+ for ; i < vLen; i++ {
+ v.Index(i).SetZero()
+ }
+ }
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillTextString(t cborType, val []byte, v reflect.Value, tum TextUnmarshalerMode) error {
+ // Check if the value implements TextUnmarshaler and the mode allows it
+ if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
+ if v.CanAddr() {
+ v = v.Addr()
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ // The contract of TextUnmarshaler forbids retaining the input
+ // bytes, so no copying is required even if val is shared.
+ if err := u.UnmarshalText(val); err != nil {
+ return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
+ }
+ return nil
+ }
+ }
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+
+ if v.Kind() == reflect.String {
+ v.SetString(string(val))
+ return nil
+ }
+
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func isImmutableKind(k reflect.Kind) bool {
+ switch k {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ return true
+
+ default:
+ return false
+ }
+}
+
+func isHashableValue(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Func:
+ return false
+
+ case reflect.Struct:
+ switch rv.Type() {
+ case typeTag:
+ tag := rv.Interface().(Tag)
+ return isHashableValue(reflect.ValueOf(tag.Content))
+ case typeBigInt:
+ return false
+ }
+ }
+ return true
+}
+
+// convertByteSliceToByteString converts []byte to ByteString if
+// - v is []byte type, or
+// - v is Tag type and tag content type is []byte
+// This function also handles nested tags.
+// CBOR data is already verified to be well-formed before this function is used,
+// so the recursion won't exceed max nested levels.
+func convertByteSliceToByteString(v any) (any, bool) {
+ switch v := v.(type) {
+ case []byte:
+ return ByteString(v), true
+
+ case Tag:
+ content, converted := convertByteSliceToByteString(v.Content)
+ if converted {
+ return Tag{Number: v.Number, Content: content}, true
+ }
+ }
+ return v, false
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go
new file mode 100644
index 0000000000..44afb86608
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go
@@ -0,0 +1,724 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "strconv"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "github.com/x448/float16"
+)
+
+// DiagMode is the main interface for CBOR diagnostic notation.
+type DiagMode interface {
+ // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
+ Diagnose([]byte) (string, error)
+
+ // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+ DiagnoseFirst([]byte) (string, []byte, error)
+
+ // DiagOptions returns user specified options used to create this DiagMode.
+ DiagOptions() DiagOptions
+}
+
+// ByteStringEncoding specifies the base encoding that byte strings are notated.
+type ByteStringEncoding uint8
+
+const (
+ // ByteStringBase16Encoding encodes byte strings in base16, without padding.
+ ByteStringBase16Encoding ByteStringEncoding = iota
+
+ // ByteStringBase32Encoding encodes byte strings in base32, without padding.
+ ByteStringBase32Encoding
+
+ // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
+ ByteStringBase32HexEncoding
+
+ // ByteStringBase64Encoding encodes byte strings in base64url, without padding.
+ ByteStringBase64Encoding
+
+ maxByteStringEncoding
+)
+
+func (bse ByteStringEncoding) valid() error {
+ if bse >= maxByteStringEncoding {
+ return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
+ }
+ return nil
+}
+
+// DiagOptions specifies Diag options.
+type DiagOptions struct {
+ // ByteStringEncoding specifies the base encoding that byte strings are notated.
+ // Default is ByteStringBase16Encoding.
+ ByteStringEncoding ByteStringEncoding
+
+ // ByteStringHexWhitespace specifies notating with whitespace in byte string
+ // when ByteStringEncoding is ByteStringBase16Encoding.
+ ByteStringHexWhitespace bool
+
+ // ByteStringText specifies notating with text in byte string
+ // if it is a valid UTF-8 text.
+ ByteStringText bool
+
+ // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
+ // if it is a valid CBOR bytes.
+ ByteStringEmbeddedCBOR bool
+
+ // CBORSequence specifies notating CBOR sequences.
+ // otherwise, it returns an error if there are more bytes after the first CBOR.
+ CBORSequence bool
+
+ // FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
+ // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
+ FloatPrecisionIndicator bool
+
+ // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
+ // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
+ // require larger amounts of stack to deserialize. Don't increase this higher than you require.
+ MaxNestedLevels int
+
+ // MaxArrayElements specifies the max number of elements for CBOR arrays.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxArrayElements int
+
+ // MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxMapPairs int
+}
+
+// DiagMode returns a DiagMode with immutable options.
+func (opts DiagOptions) DiagMode() (DiagMode, error) {
+ return opts.diagMode()
+}
+
+func (opts DiagOptions) diagMode() (*diagMode, error) {
+ if err := opts.ByteStringEncoding.valid(); err != nil {
+ return nil, err
+ }
+
+ decMode, err := DecOptions{
+ MaxNestedLevels: opts.MaxNestedLevels,
+ MaxArrayElements: opts.MaxArrayElements,
+ MaxMapPairs: opts.MaxMapPairs,
+ }.decMode()
+ if err != nil {
+ return nil, err
+ }
+
+ return &diagMode{
+ byteStringEncoding: opts.ByteStringEncoding,
+ byteStringHexWhitespace: opts.ByteStringHexWhitespace,
+ byteStringText: opts.ByteStringText,
+ byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
+ cborSequence: opts.CBORSequence,
+ floatPrecisionIndicator: opts.FloatPrecisionIndicator,
+ decMode: decMode,
+ }, nil
+}
+
+type diagMode struct {
+ byteStringEncoding ByteStringEncoding
+ byteStringHexWhitespace bool
+ byteStringText bool
+ byteStringEmbeddedCBOR bool
+ cborSequence bool
+ floatPrecisionIndicator bool
+ decMode *decMode
+}
+
+// DiagOptions returns user specified options used to create this DiagMode.
+func (dm *diagMode) DiagOptions() DiagOptions {
+ return DiagOptions{
+ ByteStringEncoding: dm.byteStringEncoding,
+ ByteStringHexWhitespace: dm.byteStringHexWhitespace,
+ ByteStringText: dm.byteStringText,
+ ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
+ CBORSequence: dm.cborSequence,
+ FloatPrecisionIndicator: dm.floatPrecisionIndicator,
+ MaxNestedLevels: dm.decMode.maxNestedLevels,
+ MaxArrayElements: dm.decMode.maxArrayElements,
+ MaxMapPairs: dm.decMode.maxMapPairs,
+ }
+}
+
+// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
+func (dm *diagMode) Diagnose(data []byte) (string, error) {
+ return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
+}
+
+// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
+ return newDiagnose(data, dm.decMode, dm).diagFirst()
+}
+
+var defaultDiagMode, _ = DiagOptions{}.diagMode()
+
+// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
+// using the default diagnostic mode.
+//
+// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
+func Diagnose(data []byte) (string, error) {
+ return defaultDiagMode.Diagnose(data)
+}
+
+// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
+ return defaultDiagMode.DiagnoseFirst(data)
+}
+
+type diagnose struct {
+ dm *diagMode
+ d *decoder
+ w *bytes.Buffer
+}
+
+func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
+ return &diagnose{
+ dm: diagm,
+ d: &decoder{data: data, dm: decm},
+ w: &bytes.Buffer{},
+ }
+}
+
+func (di *diagnose) diag(cborSequence bool) (string, error) {
+ // CBOR Sequence
+ firstItem := true
+ for {
+ switch err := di.wellformed(cborSequence); err {
+ case nil:
+ if !firstItem {
+ di.w.WriteString(", ")
+ }
+ firstItem = false
+ if itemErr := di.item(); itemErr != nil {
+ return di.w.String(), itemErr
+ }
+
+ case io.EOF:
+ if firstItem {
+ return di.w.String(), err
+ }
+ return di.w.String(), nil
+
+ default:
+ return di.w.String(), err
+ }
+ }
+}
+
+func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
+ err = di.wellformed(true)
+ if err == nil {
+ err = di.item()
+ }
+
+ if err == nil {
+ // Return EDN and the rest of the data slice (which might be len 0)
+ return di.w.String(), di.d.data[di.d.off:], nil
+ }
+
+ return di.w.String(), nil, err
+}
+
+func (di *diagnose) wellformed(allowExtraData bool) error {
+ off := di.d.off
+ err := di.d.wellformed(allowExtraData, false)
+ di.d.off = off
+ return err
+}
+
+func (di *diagnose) item() error { //nolint:gocyclo
+ initialByte := di.d.data[di.d.off]
+ switch initialByte {
+ case cborByteStringWithIndefiniteLengthHead,
+ cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
+ di.d.off++
+ if isBreakFlag(di.d.data[di.d.off]) {
+ di.d.off++
+ switch initialByte {
+ case cborByteStringWithIndefiniteLengthHead:
+ // indefinite-length bytes with no chunks.
+ di.w.WriteString(`''_`)
+ return nil
+ case cborTextStringWithIndefiniteLengthHead:
+ // indefinite-length text with no chunks.
+ di.w.WriteString(`""_`)
+ return nil
+ }
+ }
+
+ di.w.WriteString("(_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ // wellformedIndefiniteString() already checked that the next item is a byte/text string.
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte(')')
+ return nil
+
+ case cborArrayWithIndefiniteLengthHead: // indefinite-length array
+ di.d.off++
+ di.w.WriteString("[_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte(']')
+ return nil
+
+ case cborMapWithIndefiniteLengthHead: // indefinite-length map
+ di.d.off++
+ di.w.WriteString("{_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ // key
+ if err := di.item(); err != nil {
+ return err
+ }
+
+ di.w.WriteString(": ")
+
+ // value
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte('}')
+ return nil
+ }
+
+ t := di.d.nextCBORType()
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := di.d.getHead()
+ di.w.WriteString(strconv.FormatUint(val, 10))
+ return nil
+
+ case cborTypeNegativeInt:
+ _, _, val := di.d.getHead()
+ if val > math.MaxInt64 {
+ // CBOR negative integer overflows int64, use big.Int to store value.
+ bi := new(big.Int)
+ bi.SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+ di.w.WriteString(bi.String())
+ return nil
+ }
+
+ nValue := int64(-1) ^ int64(val)
+ di.w.WriteString(strconv.FormatInt(nValue, 10))
+ return nil
+
+ case cborTypeByteString:
+ b, _ := di.d.parseByteString()
+ return di.encodeByteString(b)
+
+ case cborTypeTextString:
+ b, err := di.d.parseTextString()
+ if err != nil {
+ return err
+ }
+ return di.encodeTextString(string(b), '"')
+
+ case cborTypeArray:
+ _, _, val := di.d.getHead()
+ count := int(val)
+ di.w.WriteByte('[')
+
+ for i := 0; i < count; i++ {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+ di.w.WriteByte(']')
+ return nil
+
+ case cborTypeMap:
+ _, _, val := di.d.getHead()
+ count := int(val)
+ di.w.WriteByte('{')
+
+ for i := 0; i < count; i++ {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+ // key
+ if err := di.item(); err != nil {
+ return err
+ }
+ di.w.WriteString(": ")
+ // value
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+ di.w.WriteByte('}')
+ return nil
+
+ case cborTypeTag:
+ _, _, tagNum := di.d.getHead()
+ switch tagNum {
+ case tagNumUnsignedBignum:
+ if nt := di.d.nextCBORType(); nt != cborTypeByteString {
+ return newInadmissibleTagContentTypeError(
+ tagNumUnsignedBignum,
+ "byte string",
+ nt.String())
+ }
+
+ b, _ := di.d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ di.w.WriteString(bi.String())
+ return nil
+
+ case tagNumNegativeBignum:
+ if nt := di.d.nextCBORType(); nt != cborTypeByteString {
+ return newInadmissibleTagContentTypeError(
+ tagNumNegativeBignum,
+ "byte string",
+ nt.String(),
+ )
+ }
+
+ b, _ := di.d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+ di.w.WriteString(bi.String())
+ return nil
+
+ default:
+ di.w.WriteString(strconv.FormatUint(tagNum, 10))
+ di.w.WriteByte('(')
+ if err := di.item(); err != nil {
+ return err
+ }
+ di.w.WriteByte(')')
+ return nil
+ }
+
+ case cborTypePrimitives:
+ _, ai, val := di.d.getHead()
+ switch ai {
+ case additionalInformationAsFalse:
+ di.w.WriteString("false")
+ return nil
+
+ case additionalInformationAsTrue:
+ di.w.WriteString("true")
+ return nil
+
+ case additionalInformationAsNull:
+ di.w.WriteString("null")
+ return nil
+
+ case additionalInformationAsUndefined:
+ di.w.WriteString("undefined")
+ return nil
+
+ case additionalInformationAsFloat16,
+ additionalInformationAsFloat32,
+ additionalInformationAsFloat64:
+ return di.encodeFloat(ai, val)
+
+ default:
+ di.w.WriteString("simple(")
+ di.w.WriteString(strconv.FormatUint(val, 10))
+ di.w.WriteByte(')')
+ return nil
+ }
+ }
+
+ return nil
+}
+
+// writeU16 format a rune as "\uxxxx"
+func (di *diagnose) writeU16(val rune) {
+ di.w.WriteString("\\u")
+ var in [2]byte
+ in[0] = byte(val >> 8)
+ in[1] = byte(val)
+ sz := hex.EncodedLen(len(in))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ hex.Encode(dst, in[:])
+ di.w.Write(dst)
+}
+
+var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
+var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
+
+func (di *diagnose) encodeByteString(val []byte) error {
+ if len(val) > 0 {
+ if di.dm.byteStringText && utf8.Valid(val) {
+ return di.encodeTextString(string(val), '\'')
+ }
+
+ if di.dm.byteStringEmbeddedCBOR {
+ di2 := newDiagnose(val, di.dm.decMode, di.dm)
+ // should always notating embedded CBOR sequence.
+ if str, err := di2.diag(true); err == nil {
+ di.w.WriteString("<<")
+ di.w.WriteString(str)
+ di.w.WriteString(">>")
+ return nil
+ }
+ }
+ }
+
+ switch di.dm.byteStringEncoding {
+ case ByteStringBase16Encoding:
+ di.w.WriteString("h'")
+ if di.dm.byteStringHexWhitespace {
+ sz := hex.EncodedLen(len(val))
+ if len(val) > 0 {
+ sz += len(val) - 1
+ }
+ di.w.Grow(sz)
+
+ dst := di.w.Bytes()[di.w.Len():]
+ for i := range val {
+ if i > 0 {
+ dst = append(dst, ' ')
+ }
+ hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
+ dst = dst[:len(dst)+2]
+ }
+ di.w.Write(dst)
+ } else {
+ sz := hex.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ hex.Encode(dst, val)
+ di.w.Write(dst)
+ }
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase32Encoding:
+ di.w.WriteString("b32'")
+ sz := rawBase32Encoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ rawBase32Encoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase32HexEncoding:
+ di.w.WriteString("h32'")
+ sz := rawBase32HexEncoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ rawBase32HexEncoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase64Encoding:
+ di.w.WriteString("b64'")
+ sz := base64.RawURLEncoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ base64.RawURLEncoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ default:
+ // It should not be possible for users to construct a *diagMode with an invalid byte
+ // string encoding.
+ panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
+ }
+}
+
+const utf16SurrSelf = rune(0x10000)
+
+// quote should be either `'` or `"`
+func (di *diagnose) encodeTextString(val string, quote byte) error {
+ di.w.WriteByte(quote)
+
+ for i := 0; i < len(val); {
+ if b := val[i]; b < utf8.RuneSelf {
+ switch {
+ case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
+ di.w.WriteByte('\\')
+
+ switch b {
+ case '\t':
+ b = 't'
+ case '\n':
+ b = 'n'
+ case '\r':
+ b = 'r'
+ }
+ di.w.WriteByte(b)
+
+ case b >= ' ' && b <= '~':
+ di.w.WriteByte(b)
+
+ default:
+ di.writeU16(rune(b))
+ }
+
+ i++
+ continue
+ }
+
+ c, size := utf8.DecodeRuneInString(val[i:])
+ switch {
+ case c == utf8.RuneError:
+ return &SemanticError{"cbor: invalid UTF-8 string"}
+
+ case c < utf16SurrSelf:
+ di.writeU16(c)
+
+ default:
+ c1, c2 := utf16.EncodeRune(c)
+ di.writeU16(c1)
+ di.writeU16(c2)
+ }
+
+ i += size
+ }
+
+ di.w.WriteByte(quote)
+ return nil
+}
+
+func (di *diagnose) encodeFloat(ai byte, val uint64) error {
+ f64 := float64(0)
+ switch ai {
+ case additionalInformationAsFloat16:
+ f16 := float16.Frombits(uint16(val))
+ switch {
+ case f16.IsNaN():
+ di.w.WriteString("NaN")
+ return nil
+ case f16.IsInf(1):
+ di.w.WriteString("Infinity")
+ return nil
+ case f16.IsInf(-1):
+ di.w.WriteString("-Infinity")
+ return nil
+ default:
+ f64 = float64(f16.Float32())
+ }
+
+ case additionalInformationAsFloat32:
+ f32 := math.Float32frombits(uint32(val))
+ switch {
+ case f32 != f32:
+ di.w.WriteString("NaN")
+ return nil
+ case f32 > math.MaxFloat32:
+ di.w.WriteString("Infinity")
+ return nil
+ case f32 < -math.MaxFloat32:
+ di.w.WriteString("-Infinity")
+ return nil
+ default:
+ f64 = float64(f32)
+ }
+
+ case additionalInformationAsFloat64:
+ f64 = math.Float64frombits(val)
+ switch {
+ case f64 != f64:
+ di.w.WriteString("NaN")
+ return nil
+ case f64 > math.MaxFloat64:
+ di.w.WriteString("Infinity")
+ return nil
+ case f64 < -math.MaxFloat64:
+ di.w.WriteString("-Infinity")
+ return nil
+ }
+ }
+ // Use ES6 number to string conversion which should match most JSON generators.
+ // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
+ const bitSize = 64
+ b := make([]byte, 0, 32)
+ if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
+ b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
+ // clean up e-09 to e-9
+ n := len(b)
+ if n >= 4 && string(b[n-4:n-1]) == "e-0" {
+ b = append(b[:n-2], b[n-1])
+ }
+ } else {
+ b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
+ }
+
+ // add decimal point and trailing zero if needed
+ if bytes.IndexByte(b, '.') < 0 {
+ if i := bytes.IndexByte(b, 'e'); i < 0 {
+ b = append(b, '.', '0')
+ } else {
+ b = append(b[:i+2], b[i:]...)
+ b[i] = '.'
+ b[i+1] = '0'
+ }
+ }
+
+ di.w.WriteString(string(b))
+
+ if di.dm.floatPrecisionIndicator {
+ switch ai {
+ case additionalInformationAsFloat16:
+ di.w.WriteString("_1")
+ return nil
+
+ case additionalInformationAsFloat32:
+ di.w.WriteString("_2")
+ return nil
+
+ case additionalInformationAsFloat64:
+ di.w.WriteString("_3")
+ return nil
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go
new file mode 100644
index 0000000000..c758b73748
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/doc.go
@@ -0,0 +1,152 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+/*
+Package cbor is a modern CBOR codec (RFC 8949 & RFC 8742) with CBOR tags,
+Go struct tag options (toarray/keyasint/omitempty/omitzero), Core Deterministic Encoding,
+CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
+
+Encoding options allow "preferred serialization" by encoding integers and floats
+to their smallest forms (e.g. float16) when values fit.
+
+Struct tag options "keyasint", "toarray", "omitempty", and "omitzero" reduce encoding size
+and reduce programming effort.
+
+For example, "toarray" tag makes struct fields encode to CBOR array elements. And
+"keyasint" makes a field encode to an element of CBOR map with specified int key.
+
+Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
+
+# Basics
+
+The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
+
+Function signatures identical to encoding/json include:
+
+ Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode
+
+Standard interfaces include:
+
+ BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler
+
+Diagnostic functions translate CBOR data item into Diagnostic Notation:
+
+ Diagnose, DiagnoseFirst
+
+Functions that simplify using CBOR Sequences (RFC 8742) include:
+
+ UnmarshalFirst
+
+Custom encoding and decoding is possible by implementing standard interfaces for
+user-defined Go types.
+
+Codec functions are available at package-level (using defaults options) or by
+creating modes from options at runtime.
+
+"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
+
+EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
+
+ em, err := cbor.EncOptions{...}.EncMode()
+ em, err := cbor.CanonicalEncOptions().EncMode()
+ em, err := cbor.CTAP2EncOptions().EncMode()
+
+Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
+modes won't accidentally change at runtime after they're created.
+
+Modes are intended to be reused and are safe for concurrent use.
+
+EncMode and DecMode Interfaces
+
+ // EncMode interface uses immutable options and is safe for concurrent use.
+ type EncMode interface {
+ Marshal(v interface{}) ([]byte, error)
+ NewEncoder(w io.Writer) *Encoder
+ EncOptions() EncOptions // returns copy of options
+ }
+
+ // DecMode interface uses immutable options and is safe for concurrent use.
+ type DecMode interface {
+ Unmarshal(data []byte, v interface{}) error
+ NewDecoder(r io.Reader) *Decoder
+ DecOptions() DecOptions // returns copy of options
+ }
+
+Using Default Encoding Mode
+
+ b, err := cbor.Marshal(v)
+
+ encoder := cbor.NewEncoder(w)
+ err = encoder.Encode(v)
+
+Using Default Decoding Mode
+
+ err := cbor.Unmarshal(b, &v)
+
+ decoder := cbor.NewDecoder(r)
+ err = decoder.Decode(&v)
+
+Using Default Mode of UnmarshalFirst to Decode CBOR Sequences
+
+ // Decode the first CBOR data item and return remaining bytes:
+ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
+
+Using Extended Diagnostic Notation (EDN) to represent CBOR data
+
+ // Translate the first CBOR data item into text and return remaining bytes.
+ text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to text
+
+Creating and Using Encoding Modes
+
+ // Create EncOptions using either struct literal or a function.
+ opts := cbor.CanonicalEncOptions()
+
+ // If needed, modify encoding options
+ opts.Time = cbor.TimeUnix
+
+ // Create reusable EncMode interface with immutable options, safe for concurrent use.
+ em, err := opts.EncMode()
+
+ // Use EncMode like encoding/json, with same function signatures.
+ b, err := em.Marshal(v)
+ // or
+ encoder := em.NewEncoder(w)
+ err := encoder.Encode(v)
+
+ // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
+ // specified during creation of em (encoding mode).
+
+# CBOR Options
+
+Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
+
+Encoding Options: https://github.com/fxamacker/cbor#encoding-options
+
+Decoding Options: https://github.com/fxamacker/cbor#decoding-options
+
+# Struct Tags
+
+Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
+If both struct tags are specified then `cbor` is used.
+
+Struct tag options like "keyasint", "toarray", "omitempty", and "omitzero" make it easy to use
+very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
+
+The "omitzero" option omits zero values from encoding, matching
+[stdlib encoding/json behavior](https://pkg.go.dev/encoding/json#Marshal).
+When specified in the `cbor` tag, the option is always honored.
+When specified in the `json` tag, the option is honored when building with Go 1.24+.
+
+For example, "toarray" makes struct fields encode to array elements. And "keyasint"
+makes struct fields encode to elements of CBOR map with int keys.
+
+https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
+
+Struct tag options are listed at https://github.com/fxamacker/cbor#struct-tags-1
+
+# Tests and Fuzzing
+
+Over 375 tests are included in this package. Cover-guided fuzzing is handled by
+a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
+*/
+package cbor
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go
new file mode 100644
index 0000000000..c550617c38
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/encode.go
@@ -0,0 +1,2299 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/x448/float16"
+)
+
+// Marshal returns the CBOR encoding of v using default encoding options.
+// See EncOptions for encoding options.
+//
+// Marshal uses the following encoding rules:
+//
+// If value implements the Marshaler interface, Marshal calls its
+// MarshalCBOR method.
+//
+// If value implements encoding.BinaryMarshaler, Marhsal calls its
+// MarshalBinary method and encode it as CBOR byte string.
+//
+// Boolean values encode as CBOR booleans (type 7).
+//
+// Positive integer values encode as CBOR positive integers (type 0).
+//
+// Negative integer values encode as CBOR negative integers (type 1).
+//
+// Floating point values encode as CBOR floating points (type 7).
+//
+// String values encode as CBOR text strings (type 3).
+//
+// []byte values encode as CBOR byte strings (type 2).
+//
+// Array and slice values encode as CBOR arrays (type 4).
+//
+// Map values encode as CBOR maps (type 5).
+//
+// Struct values encode as CBOR maps (type 5). Each exported struct field
+// becomes a pair with field name encoded as CBOR text string (type 3) and
+// field value encoded based on its type. See struct tag option "keyasint"
+// to encode field name as CBOR integer (type 0 and 1). Also see struct
+// tag option "toarray" for special field "_" to encode struct values as
+// CBOR array (type 4).
+//
+// Marshal supports format string stored under the "cbor" key in the struct
+// field's tag. CBOR format string can specify the name of the field,
+// "omitempty", "omitzero" and "keyasint" options, and special case "-" for
+// field omission. If "cbor" key is absent, Marshal uses "json" key.
+// When using the "json" key, the "omitzero" option is honored when building
+// with Go 1.24+ to match stdlib encoding/json behavior.
+//
+// Struct field name is treated as integer if it has "keyasint" option in
+// its format string. The format string must specify an integer as its
+// field name.
+//
+// Special struct field "_" is used to specify struct level options, such as
+// "toarray". "toarray" option enables Go struct to be encoded as CBOR array.
+// "omitempty" and "omitzero" are disabled by "toarray" to ensure that the
+// same number of elements are encoded every time.
+//
+// Anonymous struct fields are marshaled as if their exported fields
+// were fields in the outer struct. Marshal follows the same struct fields
+// visibility rules used by JSON encoding package.
+//
+// time.Time values encode as text strings specified in RFC3339 or numerical
+// representation of seconds since January 1, 1970 UTC depending on
+// EncOptions.Time setting. Also See EncOptions.TimeTag to encode
+// time.Time as CBOR tag with tag number 0 or 1.
+//
+// big.Int values encode as CBOR integers (type 0 and 1) if values fit.
+// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See
+// EncOptions.BigIntConvert to always encode big.Int values as CBOR
+// bignums.
+//
+// Pointer values encode as the value pointed to.
+//
+// Interface values encode as the value stored in the interface.
+//
+// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7).
+//
+// Values of other types cannot be encoded in CBOR. Attempting
+// to encode such a value causes Marshal to return an UnsupportedTypeError.
+func Marshal(v any) ([]byte, error) {
+ return defaultEncMode.Marshal(v)
+}
+
+// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool)
+// and uses default encoding options.
+//
+// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain
+// partially encoded data if error is returned.
+//
+// See Marshal for more details.
+func MarshalToBuffer(v any, buf *bytes.Buffer) error {
+ return defaultEncMode.MarshalToBuffer(v, buf)
+}
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid CBOR.
+type Marshaler interface {
+ MarshalCBOR() ([]byte, error)
+}
+
+// MarshalerError represents error from checking encoded CBOR data item
+// returned from MarshalCBOR for well-formedness and some very limited tag validation.
+type MarshalerError struct {
+ typ reflect.Type
+ err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "cbor: error calling MarshalCBOR for type " +
+ e.typ.String() +
+ ": " + e.err.Error()
+}
+
+func (e *MarshalerError) Unwrap() error {
+ return e.err
+}
+
+type TranscodeError struct {
+ err error
+ rtype reflect.Type
+ sourceFormat, targetFormat string
+}
+
+func (e TranscodeError) Error() string {
+ return "cbor: cannot transcode from " + e.sourceFormat + " to " + e.targetFormat + ": " + e.err.Error()
+}
+
+func (e TranscodeError) Unwrap() error {
+ return e.err
+}
+
+// UnsupportedTypeError is returned by Marshal when attempting to encode value
+// of an unsupported type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "cbor: unsupported type: " + e.Type.String()
+}
+
+// UnsupportedValueError is returned by Marshal when attempting to encode an
+// unsupported value.
+type UnsupportedValueError struct {
+ msg string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "cbor: unsupported value: " + e.msg
+}
+
+// SortMode identifies supported sorting order.
+type SortMode int
+
+const (
+ // SortNone encodes map pairs and struct fields in an arbitrary order.
+ SortNone SortMode = 0
+
+ // SortLengthFirst causes map keys or struct fields to be sorted such that:
+ // - If two keys have different lengths, the shorter one sorts earlier;
+ // - If two keys have the same length, the one with the lower value in
+ // (byte-wise) lexical order sorts earlier.
+ // It is used in "Canonical CBOR" encoding in RFC 7049 3.9.
+ SortLengthFirst SortMode = 1
+
+ // SortBytewiseLexical causes map keys or struct fields to be sorted in the
+ // bytewise lexicographic order of their deterministic CBOR encodings.
+ // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding"
+ // in RFC 7049bis.
+ SortBytewiseLexical SortMode = 2
+
+ // SortShuffle encodes map pairs and struct fields in a shuffled
+ // order. This mode does not guarantee an unbiased permutation, but it
+ // does guarantee that the runtime of the shuffle algorithm used will be
+ // constant.
+ SortFastShuffle SortMode = 3
+
+ // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9.
+ SortCanonical SortMode = SortLengthFirst
+
+ // SortCTAP2 is used in "CTAP2 Canonical CBOR".
+ SortCTAP2 SortMode = SortBytewiseLexical
+
+ // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis.
+ SortCoreDeterministic SortMode = SortBytewiseLexical
+
+ maxSortMode SortMode = 4
+)
+
+func (sm SortMode) valid() bool {
+ return sm >= 0 && sm < maxSortMode
+}
+
+// StringMode specifies how to encode Go string values.
+type StringMode int
+
+const (
+ // StringToTextString encodes Go string to CBOR text string (major type 3).
+ StringToTextString StringMode = iota
+
+ // StringToByteString encodes Go string to CBOR byte string (major type 2).
+ StringToByteString
+)
+
+func (st StringMode) cborType() (cborType, error) {
+ switch st {
+ case StringToTextString:
+ return cborTypeTextString, nil
+
+ case StringToByteString:
+ return cborTypeByteString, nil
+ }
+ return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st)))
+}
+
+// ShortestFloatMode specifies which floating-point format should
+// be used as the shortest possible format for CBOR encoding.
+// It is not used for encoding Infinity and NaN values.
+type ShortestFloatMode int
+
+const (
+ // ShortestFloatNone makes float values encode without any conversion.
+ // This is the default for ShortestFloatMode in v1.
+ // E.g. a float32 in Go will encode to CBOR float32. And
+ // a float64 in Go will encode to CBOR float64.
+ ShortestFloatNone ShortestFloatMode = iota
+
+ // ShortestFloat16 specifies float16 as the shortest form that preserves value.
+ // E.g. if float64 can convert to float32 while preserving value, then
+ // encoding will also try to convert float32 to float16. So a float64 might
+ // encode as CBOR float64, float32 or float16 depending on the value.
+ ShortestFloat16
+
+ maxShortestFloat
+)
+
+func (sfm ShortestFloatMode) valid() bool {
+ return sfm >= 0 && sfm < maxShortestFloat
+}
+
+// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode.
+// ShortestFloatMode is not used for encoding Infinity and NaN values.
+type NaNConvertMode int
+
+const (
+ // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00).
+ NaNConvert7e00 NaNConvertMode = iota
+
+ // NaNConvertNone never modifies or converts NaN to other representations
+ // (float64 NaN stays float64, etc. even if it can use float16 without losing
+ // any bits).
+ NaNConvertNone
+
+ // NaNConvertPreserveSignal converts NaN to the smallest form that preserves
+ // value (quiet bit + payload) as described in RFC 7049bis Draft 12.
+ NaNConvertPreserveSignal
+
+ // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves
+ // NaN payload.
+ NaNConvertQuiet
+
+ // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value.
+ NaNConvertReject
+
+ maxNaNConvert
+)
+
+func (ncm NaNConvertMode) valid() bool {
+ return ncm >= 0 && ncm < maxNaNConvert
+}
+
+// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode.
+// ShortestFloatMode is not used for encoding Infinity and NaN values.
+type InfConvertMode int
+
+const (
+ // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16).
+ InfConvertFloat16 InfConvertMode = iota
+
+ // InfConvertNone never converts (used by CTAP2 Canonical CBOR).
+ InfConvertNone
+
+ // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value.
+ InfConvertReject
+
+ maxInfConvert
+)
+
+func (icm InfConvertMode) valid() bool {
+ return icm >= 0 && icm < maxInfConvert
+}
+
+// TimeMode specifies how to encode time.Time values in compliance with RFC 8949 (CBOR):
+// - Section 3.4.1: Standard Date/Time String
+// - Section 3.4.2: Epoch-Based Date/Time
+// For more info, see:
+// - https://www.rfc-editor.org/rfc/rfc8949.html
+// NOTE: User applications that prefer to encode time with fractional seconds to an integer
+// (instead of floating point or text string) can use a CBOR tag number not assigned by IANA:
+// 1. Define a user-defined type in Go with just a time.Time or int64 as its data.
+// 2. Implement the cbor.Marshaler and cbor.Unmarshaler interface for that user-defined type
+// to encode or decode the tagged data item with an enclosed integer content.
+type TimeMode int
+
+const (
+ // TimeUnix causes time.Time to encode to a CBOR time (tag 1) with an integer content
+ // representing seconds elapsed (with 1-second precision) since UNIX Epoch UTC.
+ // The TimeUnix option is location independent and has a clear precision guarantee.
+ TimeUnix TimeMode = iota
+
+ // TimeUnixMicro causes time.Time to encode to a CBOR time (tag 1) with a floating point content
+ // representing seconds elapsed (with up to 1-microsecond precision) since UNIX Epoch UTC.
+ // NOTE: The floating point content is encoded to the shortest floating-point encoding that preserves
+ // the 64-bit floating point value. I.e., the floating point encoding can be IEEE 764:
+ // binary64, binary32, or binary16 depending on the content's value.
+ TimeUnixMicro
+
+ // TimeUnixDynamic causes time.Time to encode to a CBOR time (tag 1) with either an integer content or
+ // a floating point content, depending on the content's value. This option is equivalent to dynamically
+ // choosing TimeUnix if time.Time doesn't have fractional seconds, and using TimeUnixMicro if time.Time
+ // has fractional seconds.
+ TimeUnixDynamic
+
+ // TimeRFC3339 causes time.Time to encode to a CBOR time (tag 0) with a text string content
+ // representing the time using 1-second precision in RFC3339 format. If the time.Time has a
+ // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
+ // NOTE: User applications can avoid including the RFC3339 numeric offset by:
+ // - providing a time.Time value set to UTC, or
+ // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339.
+ TimeRFC3339
+
+ // TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content
+ // representing the time using 1-nanosecond precision in RFC3339 format. If the time.Time has a
+ // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
+ // NOTE: User applications can avoid including the RFC3339 numeric offset by:
+ // - providing a time.Time value set to UTC, or
+ // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano.
+ TimeRFC3339Nano
+
+ maxTimeMode
+)
+
+func (tm TimeMode) valid() bool {
+ return tm >= 0 && tm < maxTimeMode
+}
+
+// BigIntConvertMode specifies how to encode big.Int values.
+type BigIntConvertMode int
+
+const (
+ // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits.
+ // E.g. if big.Int value can be converted to CBOR integer while preserving
+ // value, encoder will encode it to CBOR integer (major type 0 or 1).
+ BigIntConvertShortest BigIntConvertMode = iota
+
+ // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without
+ // converting it to another CBOR type.
+ BigIntConvertNone
+
+ // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int.
+ BigIntConvertReject
+
+ maxBigIntConvert
+)
+
+func (bim BigIntConvertMode) valid() bool {
+ return bim >= 0 && bim < maxBigIntConvert
+}
+
+// NilContainersMode specifies how to encode nil slices and maps.
+type NilContainersMode int
+
+const (
+ // NilContainerAsNull encodes nil slices and maps as CBOR null.
+ // This is the default.
+ NilContainerAsNull NilContainersMode = iota
+
+ // NilContainerAsEmpty encodes nil slices and maps as
+ // empty container (CBOR bytestring, array, or map).
+ NilContainerAsEmpty
+
+ maxNilContainersMode
+)
+
+func (m NilContainersMode) valid() bool {
+ return m >= 0 && m < maxNilContainersMode
+}
+
+// OmitEmptyMode specifies how to encode struct fields with omitempty tag.
+// The default behavior omits if field value would encode as empty CBOR value.
+type OmitEmptyMode int
+
+const (
+ // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty"
+ // should be omitted from encoding if the field would be encoded as an empty
+ // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string,
+ // empty array, or empty map.
+ OmitEmptyCBORValue OmitEmptyMode = iota
+
+ // OmitEmptyGoValue specifies that struct fields tagged with "omitempty"
+ // should be omitted from encoding if the field has an empty Go value,
+ // defined as false, 0, 0.0, a nil pointer, a nil interface value, and
+ // any empty array, slice, map, or string.
+ // This behavior is the same as the current (aka v1) encoding/json package
+ // included in Go.
+ OmitEmptyGoValue
+
+ maxOmitEmptyMode
+)
+
+func (om OmitEmptyMode) valid() bool {
+ return om >= 0 && om < maxOmitEmptyMode
+}
+
+// FieldNameMode specifies the CBOR type to use when encoding struct field names.
+type FieldNameMode int
+
+const (
+ // FieldNameToTextString encodes struct fields to CBOR text string (major type 3).
+ FieldNameToTextString FieldNameMode = iota
+
+ // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2).
+ FieldNameToByteString
+
+ maxFieldNameMode
+)
+
+func (fnm FieldNameMode) valid() bool {
+ return fnm >= 0 && fnm < maxFieldNameMode
+}
+
+// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23)
+// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will
+// always encode unmodified bytes from the byte slice and just wrap it within
+// CBOR tag 21, 22, or 23 if specified.
+// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2.
+type ByteSliceLaterFormatMode int
+
+const (
+ // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // without adding CBOR tag 21, 22, or 23.
+ ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota
+
+ // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase64URL
+
+ // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase64
+
+ // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase16
+)
+
+func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) {
+ switch bsefm {
+ case ByteSliceLaterFormatNone:
+ return 0, nil
+
+ case ByteSliceLaterFormatBase64URL:
+ return tagNumExpectedLaterEncodingBase64URL, nil
+
+ case ByteSliceLaterFormatBase64:
+ return tagNumExpectedLaterEncodingBase64, nil
+
+ case ByteSliceLaterFormatBase16:
+ return tagNumExpectedLaterEncodingBase16, nil
+ }
+ return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm)))
+}
+
+// ByteArrayMode specifies how to encode byte arrays.
+type ByteArrayMode int
+
+const (
+ // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical
+ // length and contents is encoded.
+ ByteArrayToByteSlice ByteArrayMode = iota
+
+ // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer
+ // item for each byte in the array.
+ ByteArrayToArray
+
+ maxByteArrayMode
+)
+
+func (bam ByteArrayMode) valid() bool {
+ return bam >= 0 && bam < maxByteArrayMode
+}
+
+// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler.
+type BinaryMarshalerMode int
+
+const (
+ // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string.
+ BinaryMarshalerByteString BinaryMarshalerMode = iota
+
+ // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode.
+ BinaryMarshalerNone
+
+ maxBinaryMarshalerMode
+)
+
+func (bmm BinaryMarshalerMode) valid() bool {
+ return bmm >= 0 && bmm < maxBinaryMarshalerMode
+}
+
+// TextMarshalerMode specifies how to encode types that implement encoding.TextMarshaler.
+type TextMarshalerMode int
+
+const (
+ // TextMarshalerNone does not recognize TextMarshaler implementations during encode.
+ // This is the default behavior.
+ TextMarshalerNone TextMarshalerMode = iota
+
+ // TextMarshalerTextString encodes the output of MarshalText to a CBOR text string.
+ TextMarshalerTextString
+
+ maxTextMarshalerMode
+)
+
+func (tmm TextMarshalerMode) valid() bool {
+ return tmm >= 0 && tmm < maxTextMarshalerMode
+}
+
+// EncOptions specifies encoding options.
+type EncOptions struct {
+ // Sort specifies sorting order.
+ Sort SortMode
+
+ // ShortestFloat specifies the shortest floating-point encoding that preserves
+ // the value being encoded.
+ ShortestFloat ShortestFloatMode
+
+ // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode.
+ NaNConvert NaNConvertMode
+
+ // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode.
+ InfConvert InfConvertMode
+
+ // BigIntConvert specifies how to encode big.Int values.
+ BigIntConvert BigIntConvertMode
+
+ // Time specifies how to encode time.Time.
+ Time TimeMode
+
+ // TimeTag allows time.Time to be encoded with a tag number.
+ // RFC3339 format gets tag number 0, and numeric epoch time tag number 1.
+ TimeTag EncTagMode
+
+ // IndefLength specifies whether to allow indefinite length CBOR items.
+ IndefLength IndefLengthMode
+
+ // NilContainers specifies how to encode nil slices and maps.
+ NilContainers NilContainersMode
+
+ // TagsMd specifies whether to allow CBOR tags (major type 6).
+ TagsMd TagsMode
+
+ // OmitEmptyMode specifies how to encode struct fields with omitempty tag.
+ OmitEmpty OmitEmptyMode
+
+ // String specifies which CBOR type to use when encoding Go strings.
+ // - CBOR text string (major type 3) is default
+ // - CBOR byte string (major type 2)
+ String StringMode
+
+ // FieldName specifies the CBOR type to use when encoding struct field names.
+ FieldName FieldNameMode
+
+ // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23)
+ // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will
+ // always encode unmodified bytes from the byte slice and just wrap it within
+ // CBOR tag 21, 22, or 23 if specified.
+ // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2.
+ ByteSliceLaterFormat ByteSliceLaterFormatMode
+
+ // ByteArray specifies how to encode byte arrays.
+ ByteArray ByteArrayMode
+
+ // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler.
+ BinaryMarshaler BinaryMarshalerMode
+
+ // TextMarshaler specifies how to encode types that implement encoding.TextMarshaler.
+ TextMarshaler TextMarshalerMode
+
+ // JSONMarshalerTranscoder sets the transcoding scheme used to marshal types that implement
+ // json.Marshaler but do not also implement cbor.Marshaler. If nil, encoding behavior is not
+ // influenced by whether or not a type implements json.Marshaler.
+ JSONMarshalerTranscoder Transcoder
+}
+
+// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding,
+// defined in RFC 7049 Section 3.9 with the following rules:
+//
+// 1. "Integers must be as small as possible."
+// 2. "The expression of lengths in major types 2 through 5 must be as short as possible."
+// 3. The keys in every map must be sorted in length-first sorting order.
+// See SortLengthFirst for details.
+// 4. "Indefinite-length items must be made into definite-length items."
+// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might
+// need to be added. One example rule might be to have all floats start as a 64-bit
+// float, then do a test conversion to a 32-bit float; if the result is the same numeric
+// value, use the shorter value and repeat the process with a test conversion to a
+// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity
+// as well.) Also, there are many representations for NaN. If NaN is an allowed value,
+// it must always be represented as 0xf97e00."
+func CanonicalEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCanonical,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ IndefLength: IndefLengthForbidden,
+ }
+}
+
+// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding,
+// defined in CTAP specification, with the following rules:
+//
+// 1. "Integers must be encoded as small as possible."
+// 2. "The representations of any floating-point values are not changed."
+// 3. "The expression of lengths in major types 2 through 5 must be as short as possible."
+// 4. "Indefinite-length items must be made into definite-length items.""
+// 5. The keys in every map must be sorted in bytewise lexicographic order.
+// See SortBytewiseLexical for details.
+// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present."
+func CTAP2EncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCTAP2,
+ ShortestFloat: ShortestFloatNone,
+ NaNConvert: NaNConvertNone,
+ InfConvert: InfConvertNone,
+ IndefLength: IndefLengthForbidden,
+ TagsMd: TagsForbidden,
+ }
+}
+
+// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding,
+// defined in RFC 7049bis with the following rules:
+//
+// 1. "Preferred serialization MUST be used. In particular, this means that arguments
+// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST
+// be as short as possible"
+// "Floating point values also MUST use the shortest form that preserves the value"
+// 2. "Indefinite-length items MUST NOT appear."
+// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of
+// their deterministic encodings."
+func CoreDetEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCoreDeterministic,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ IndefLength: IndefLengthForbidden,
+ }
+}
+
+// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding,
+// defined in RFC 7049bis with the following rules:
+//
+// 1. "The preferred serialization always uses the shortest form of representing the argument
+// (Section 3);"
+// 2. "it also uses the shortest floating-point encoding that preserves the value being
+// encoded (see Section 5.5)."
+// "The preferred encoding for a floating-point value is the shortest floating-point encoding
+// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the
+// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter
+// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding
+// the shorter significand towards the right reconstitutes the original NaN value (for many
+// applications, the single NaN encoding 0xf97e00 will suffice)."
+// 3. "Definite length encoding is preferred whenever the length is known at the time the
+// serialization of the item starts."
+func PreferredUnsortedEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortNone,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ }
+}
+
+// EncMode returns EncMode with immutable options and no tags (safe for concurrency).
+func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.encMode()
+}
+
+// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency).
+func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.encMode()
+}
+
+// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency).
+func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.UserBufferEncModeWithTags(tags)
+}
+
+// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency).
+func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet")
+ }
+ em, err := opts.encMode()
+ if err != nil {
+ return nil, err
+ }
+ // Copy tags
+ ts := tagSet(make(map[reflect.Type]*tagItem))
+ syncTags := tags.(*syncTagSet)
+ syncTags.RLock()
+ for contentType, tag := range syncTags.t {
+ if tag.opts.EncTag != EncTagNone {
+ ts[contentType] = tag
+ }
+ }
+ syncTags.RUnlock()
+ if len(ts) > 0 {
+ em.tags = ts
+ }
+ return em, nil
+}
+
+// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.UserBufferEncModeWithSharedTags(tags)
+}
+
+// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet")
+ }
+ em, err := opts.encMode()
+ if err != nil {
+ return nil, err
+ }
+ em.tags = tags
+ return em, nil
+}
+
+func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam
+ if !opts.Sort.valid() {
+ return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort)))
+ }
+ if !opts.ShortestFloat.valid() {
+ return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat)))
+ }
+ if !opts.NaNConvert.valid() {
+ return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert)))
+ }
+ if !opts.InfConvert.valid() {
+ return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert)))
+ }
+ if !opts.BigIntConvert.valid() {
+ return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert)))
+ }
+ if !opts.Time.valid() {
+ return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time)))
+ }
+ if !opts.TimeTag.valid() {
+ return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag)))
+ }
+ if !opts.IndefLength.valid() {
+ return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength)))
+ }
+ if !opts.NilContainers.valid() {
+ return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers)))
+ }
+ if !opts.TagsMd.valid() {
+ return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd)))
+ }
+ if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired {
+ return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired")
+ }
+ if !opts.OmitEmpty.valid() {
+ return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty)))
+ }
+ stringMajorType, err := opts.String.cborType()
+ if err != nil {
+ return nil, err
+ }
+ if !opts.FieldName.valid() {
+ return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName)))
+ }
+ byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag()
+ if err != nil {
+ return nil, err
+ }
+ if !opts.ByteArray.valid() {
+ return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray)))
+ }
+ if !opts.BinaryMarshaler.valid() {
+ return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler)))
+ }
+ if !opts.TextMarshaler.valid() {
+ return nil, errors.New("cbor: invalid TextMarshaler " + strconv.Itoa(int(opts.TextMarshaler)))
+ }
+ em := encMode{
+ sort: opts.Sort,
+ shortestFloat: opts.ShortestFloat,
+ nanConvert: opts.NaNConvert,
+ infConvert: opts.InfConvert,
+ bigIntConvert: opts.BigIntConvert,
+ time: opts.Time,
+ timeTag: opts.TimeTag,
+ indefLength: opts.IndefLength,
+ nilContainers: opts.NilContainers,
+ tagsMd: opts.TagsMd,
+ omitEmpty: opts.OmitEmpty,
+ stringType: opts.String,
+ stringMajorType: stringMajorType,
+ fieldName: opts.FieldName,
+ byteSliceLaterFormat: opts.ByteSliceLaterFormat,
+ byteSliceLaterEncodingTag: byteSliceLaterEncodingTag,
+ byteArray: opts.ByteArray,
+ binaryMarshaler: opts.BinaryMarshaler,
+ textMarshaler: opts.TextMarshaler,
+ jsonMarshalerTranscoder: opts.JSONMarshalerTranscoder,
+ }
+ return &em, nil
+}
+
+// EncMode is the main interface for CBOR encoding.
+type EncMode interface {
+ Marshal(v any) ([]byte, error)
+ NewEncoder(w io.Writer) *Encoder
+ EncOptions() EncOptions
+}
+
+// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by
+// adding MarshalToBuffer to support user specified buffer rather than encoding
+// into the built-in buffer pool.
+type UserBufferEncMode interface {
+ EncMode
+ MarshalToBuffer(v any, buf *bytes.Buffer) error
+
+ // This private method is to prevent users implementing
+ // this interface and so future additions to it will
+ // not be breaking changes.
+ // See https://go.dev/blog/module-compatibility
+ unexport()
+}
+
+type encMode struct {
+ tags tagProvider
+ sort SortMode
+ shortestFloat ShortestFloatMode
+ nanConvert NaNConvertMode
+ infConvert InfConvertMode
+ bigIntConvert BigIntConvertMode
+ time TimeMode
+ timeTag EncTagMode
+ indefLength IndefLengthMode
+ nilContainers NilContainersMode
+ tagsMd TagsMode
+ omitEmpty OmitEmptyMode
+ stringType StringMode
+ stringMajorType cborType
+ fieldName FieldNameMode
+ byteSliceLaterFormat ByteSliceLaterFormatMode
+ byteSliceLaterEncodingTag uint64
+ byteArray ByteArrayMode
+ binaryMarshaler BinaryMarshalerMode
+ textMarshaler TextMarshalerMode
+ jsonMarshalerTranscoder Transcoder
+}
+
+var defaultEncMode, _ = EncOptions{}.encMode()
+
+// These four decoding modes are used by getMarshalerDecMode.
+// maxNestedLevels, maxArrayElements, and maxMapPairs are
+// set to max allowed limits to avoid rejecting Marshaler
+// output that would have been the allowable output of a
+// non-Marshaler object that exceeds default limits.
+var (
+ marshalerForbidIndefLengthForbidTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthForbidden,
+ tagsMd: TagsForbidden,
+ }
+
+ marshalerAllowIndefLengthForbidTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthAllowed,
+ tagsMd: TagsForbidden,
+ }
+
+ marshalerForbidIndefLengthAllowTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthForbidden,
+ tagsMd: TagsAllowed,
+ }
+
+ marshalerAllowIndefLengthAllowTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthAllowed,
+ tagsMd: TagsAllowed,
+ }
+)
+
+// getMarshalerDecMode returns one of four existing decoding modes
+// which can be reused (safe for parallel use) for the purpose of
+// checking if data returned by Marshaler is well-formed.
+func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode {
+ switch {
+ case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed:
+ return &marshalerAllowIndefLengthAllowTagsDecMode
+
+ case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden:
+ return &marshalerAllowIndefLengthForbidTagsDecMode
+
+ case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed:
+ return &marshalerForbidIndefLengthAllowTagsDecMode
+
+ case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden:
+ return &marshalerForbidIndefLengthForbidTagsDecMode
+
+ default:
+ // This should never happen, unless we add new options to
+ // IndefLengthMode or TagsMode without updating this function.
+ return &decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: indefLength,
+ tagsMd: tagsMd,
+ }
+ }
+}
+
+// EncOptions returns user specified options used to create this EncMode.
+func (em *encMode) EncOptions() EncOptions {
+ return EncOptions{
+ Sort: em.sort,
+ ShortestFloat: em.shortestFloat,
+ NaNConvert: em.nanConvert,
+ InfConvert: em.infConvert,
+ BigIntConvert: em.bigIntConvert,
+ Time: em.time,
+ TimeTag: em.timeTag,
+ IndefLength: em.indefLength,
+ NilContainers: em.nilContainers,
+ TagsMd: em.tagsMd,
+ OmitEmpty: em.omitEmpty,
+ String: em.stringType,
+ FieldName: em.fieldName,
+ ByteSliceLaterFormat: em.byteSliceLaterFormat,
+ ByteArray: em.byteArray,
+ BinaryMarshaler: em.binaryMarshaler,
+ TextMarshaler: em.textMarshaler,
+ JSONMarshalerTranscoder: em.jsonMarshalerTranscoder,
+ }
+}
+
+func (em *encMode) unexport() {}
+
+func (em *encMode) encTagBytes(t reflect.Type) []byte {
+ if em.tags != nil {
+ if tagItem := em.tags.getTagItemFromType(t); tagItem != nil {
+ return tagItem.cborTagNum
+ }
+ }
+ return nil
+}
+
+// Marshal returns the CBOR encoding of v using em encoding mode.
+//
+// See the documentation for Marshal for details.
+func (em *encMode) Marshal(v any) ([]byte, error) {
+ e := getEncodeBuffer()
+
+ if err := encode(e, em, reflect.ValueOf(v)); err != nil {
+ putEncodeBuffer(e)
+ return nil, err
+ }
+
+ buf := make([]byte, e.Len())
+ copy(buf, e.Bytes())
+
+ putEncodeBuffer(e)
+ return buf, nil
+}
+
+// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool)
+// and uses em encoding mode.
+//
+// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain
+// partially encoded data if error is returned.
+//
+// See Marshal for more details.
+func (em *encMode) MarshalToBuffer(v any, buf *bytes.Buffer) error {
+ if buf == nil {
+ return fmt.Errorf("cbor: encoding buffer provided by user is nil")
+ }
+ return encode(buf, em, reflect.ValueOf(v))
+}
+
+// NewEncoder returns a new encoder that writes to w using em EncMode.
+func (em *encMode) NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w, em: em}
+}
+
+// encodeBufferPool caches unused bytes.Buffer objects for later reuse.
+var encodeBufferPool = sync.Pool{
+ New: func() any {
+ e := new(bytes.Buffer)
+ e.Grow(32) // TODO: make this configurable
+ return e
+ },
+}
+
+func getEncodeBuffer() *bytes.Buffer {
+ return encodeBufferPool.Get().(*bytes.Buffer)
+}
+
+func putEncodeBuffer(e *bytes.Buffer) {
+ e.Reset()
+ encodeBufferPool.Put(e)
+}
+
+type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error
+type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error)
+type isZeroFunc func(v reflect.Value) (zero bool, err error)
+
+func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if !v.IsValid() {
+ // v is zero value
+ e.Write(cborNil)
+ return nil
+ }
+ vt := v.Type()
+ f, _, _ := getEncodeFunc(vt)
+ if f == nil {
+ return &UnsupportedTypeError{vt}
+ }
+
+ return f(e, em, v)
+}
+
+func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ b := cborFalse
+ if v.Bool() {
+ b = cborTrue
+ }
+ e.Write(b)
+ return nil
+}
+
+func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ i := v.Int()
+ if i >= 0 {
+ encodeHead(e, byte(cborTypePositiveInt), uint64(i))
+ return nil
+ }
+ i = i*(-1) - 1
+ encodeHead(e, byte(cborTypeNegativeInt), uint64(i))
+ return nil
+}
+
+func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ encodeHead(e, byte(cborTypePositiveInt), v.Uint())
+ return nil
+}
+
+func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ f64 := v.Float()
+ if math.IsNaN(f64) {
+ return encodeNaN(e, em, v)
+ }
+ if math.IsInf(f64, 0) {
+ return encodeInf(e, em, v)
+ }
+ fopt := em.shortestFloat
+ if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) {
+ // Encode float64
+ // Don't use encodeFloat64() because it cannot be inlined.
+ const argumentSize = 8
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64)
+ binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64))
+ e.Write(scratch[:])
+ return nil
+ }
+
+ f32 := float32(f64)
+ if fopt == ShortestFloat16 {
+ var f16 float16.Float16
+ p := float16.PrecisionFromfloat32(f32)
+ if p == float16.PrecisionExact {
+ // Roundtrip float32->float16->float32 test isn't needed.
+ f16 = float16.Fromfloat32(f32)
+ } else if p == float16.PrecisionUnknown {
+ // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16.
+ f16 = float16.Fromfloat32(f32)
+ if f16.Float32() == f32 {
+ p = float16.PrecisionExact
+ }
+ }
+ if p == float16.PrecisionExact {
+ // Encode float16
+ // Don't use encodeFloat16() because it cannot be inlined.
+ const argumentSize = 2
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16
+ binary.BigEndian.PutUint16(scratch[1:], uint16(f16))
+ e.Write(scratch[:])
+ return nil
+ }
+ }
+
+ // Encode float32
+ // Don't use encodeFloat32() because it cannot be inlined.
+ const argumentSize = 4
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32
+ binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ f64 := v.Float()
+ switch em.infConvert {
+ case InfConvertReject:
+ return &UnsupportedValueError{msg: "floating-point infinity"}
+
+ case InfConvertFloat16:
+ if f64 > 0 {
+ e.Write(cborPositiveInfinity)
+ } else {
+ e.Write(cborNegativeInfinity)
+ }
+ return nil
+ }
+ if v.Kind() == reflect.Float64 {
+ return encodeFloat64(e, f64)
+ }
+ return encodeFloat32(e, float32(f64))
+}
+
+func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ switch em.nanConvert {
+ case NaNConvert7e00:
+ e.Write(cborNaN)
+ return nil
+
+ case NaNConvertNone:
+ if v.Kind() == reflect.Float64 {
+ return encodeFloat64(e, v.Float())
+ }
+ f32 := float32NaNFromReflectValue(v)
+ return encodeFloat32(e, f32)
+
+ case NaNConvertReject:
+ return &UnsupportedValueError{msg: "floating-point NaN"}
+
+ default: // NaNConvertPreserveSignal, NaNConvertQuiet
+ if v.Kind() == reflect.Float64 {
+ f64 := v.Float()
+ f64bits := math.Float64bits(f64)
+ if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 {
+ f64bits |= 1 << 51 // Set quiet bit = 1
+ f64 = math.Float64frombits(f64bits)
+ }
+ // The lower 29 bits are dropped when converting from float64 to float32.
+ if f64bits&0x1fffffff != 0 {
+ // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s.
+ return encodeFloat64(e, f64)
+ }
+ // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits.
+ sign := uint32(f64bits>>32) & (1 << 31)
+ exp := uint32(0x7f800000)
+ coef := uint32((f64bits & 0xfffffffffffff) >> 29)
+ f32bits := sign | exp | coef
+ f32 := math.Float32frombits(f32bits)
+ // The lower 13 bits are dropped when converting from float32 to float16.
+ if f32bits&0x1fff != 0 {
+ // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s.
+ return encodeFloat32(e, f32)
+ }
+ // Encode NaN as float16
+ f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN.
+ return encodeFloat16(e, f16)
+ }
+
+ f32 := float32NaNFromReflectValue(v)
+ f32bits := math.Float32bits(f32)
+ if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 {
+ f32bits |= 1 << 22 // Set quiet bit = 1
+ f32 = math.Float32frombits(f32bits)
+ }
+ // The lower 13 bits are dropped coef bits when converting from float32 to float16.
+ if f32bits&0x1fff != 0 {
+ // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s.
+ return encodeFloat32(e, f32)
+ }
+ f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN.
+ return encodeFloat16(e, f16)
+ }
+}
+
+func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error {
+ const argumentSize = 2
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16
+ binary.BigEndian.PutUint16(scratch[1:], uint16(f16))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeFloat32(e *bytes.Buffer, f32 float32) error {
+ const argumentSize = 4
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32
+ binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeFloat64(e *bytes.Buffer, f64 float64) error {
+ const argumentSize = 8
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64
+ binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ vk := v.Kind()
+ if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 {
+ encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag)
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ slen := v.Len()
+ if slen == 0 {
+ return e.WriteByte(byte(cborTypeByteString))
+ }
+ encodeHead(e, byte(cborTypeByteString), uint64(slen))
+ if vk == reflect.Array {
+ for i := 0; i < slen; i++ {
+ e.WriteByte(byte(v.Index(i).Uint()))
+ }
+ return nil
+ }
+ e.Write(v.Bytes())
+ return nil
+}
+
+func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ s := v.String()
+ encodeHead(e, byte(em.stringMajorType), uint64(len(s)))
+ e.WriteString(s)
+ return nil
+}
+
+type arrayEncodeFunc struct {
+ f encodeFunc
+}
+
+func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 {
+ return encodeByteString(e, em, v)
+ }
+ if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ alen := v.Len()
+ if alen == 0 {
+ return e.WriteByte(byte(cborTypeArray))
+ }
+ encodeHead(e, byte(cborTypeArray), uint64(alen))
+ for i := 0; i < alen; i++ {
+ if err := ae.f(e, em, v.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// encodeKeyValueFunc encodes key/value pairs in map (v).
+// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs.
+// kvs is used for canonical encoding of map.
+type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error
+
+type mapEncodeFunc struct {
+ e encodeKeyValueFunc
+}
+
+func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ mlen := v.Len()
+ if mlen == 0 {
+ return e.WriteByte(byte(cborTypeMap))
+ }
+
+ encodeHead(e, byte(cborTypeMap), uint64(mlen))
+ if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 {
+ return me.e(e, em, v, nil)
+ }
+
+ kvsp := getKeyValues(v.Len()) // for sorting keys
+ defer putKeyValues(kvsp)
+ kvs := *kvsp
+
+ kvBeginOffset := e.Len()
+ if err := me.e(e, em, v, kvs); err != nil {
+ return err
+ }
+ kvTotalLen := e.Len() - kvBeginOffset
+
+ // Use the capacity at the tail of the encode buffer as a staging area to rearrange the
+ // encoded pairs into sorted order.
+ e.Grow(kvTotalLen)
+ tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+.
+ dst := e.Bytes()[kvBeginOffset:]
+
+ if em.sort == SortBytewiseLexical {
+ sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst})
+ } else {
+ sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst})
+ }
+
+ // This is where the encoded bytes are actually rearranged in the output buffer to reflect
+ // the desired order.
+ sortedOffset := 0
+ for _, kv := range kvs {
+ copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset])
+ sortedOffset += kv.nextOffset - kv.offset
+ }
+ copy(dst, tmp[:kvTotalLen])
+
+ return nil
+
+}
+
+// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative
+// to the first byte of the first encoded pair.
+type keyValue struct {
+ offset int
+ valueOffset int
+ nextOffset int
+}
+
+type bytewiseKeyValueSorter struct {
+ kvs []keyValue
+ data []byte
+}
+
+func (x *bytewiseKeyValueSorter) Len() int {
+ return len(x.kvs)
+}
+
+func (x *bytewiseKeyValueSorter) Swap(i, j int) {
+ x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i]
+}
+
+func (x *bytewiseKeyValueSorter) Less(i, j int) bool {
+ kvi, kvj := x.kvs[i], x.kvs[j]
+ return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0
+}
+
+type lengthFirstKeyValueSorter struct {
+ kvs []keyValue
+ data []byte
+}
+
+func (x *lengthFirstKeyValueSorter) Len() int {
+ return len(x.kvs)
+}
+
+func (x *lengthFirstKeyValueSorter) Swap(i, j int) {
+ x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i]
+}
+
+func (x *lengthFirstKeyValueSorter) Less(i, j int) bool {
+ kvi, kvj := x.kvs[i], x.kvs[j]
+ if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 {
+ return keyLengthDifference < 0
+ }
+ return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0
+}
+
+var keyValuePool = sync.Pool{}
+
+func getKeyValues(length int) *[]keyValue {
+ v := keyValuePool.Get()
+ if v == nil {
+ y := make([]keyValue, length)
+ return &y
+ }
+ x := v.(*[]keyValue)
+ if cap(*x) >= length {
+ *x = (*x)[:length]
+ return x
+ }
+ // []keyValue from the pool does not have enough capacity.
+ // Return it back to the pool and create a new one.
+ keyValuePool.Put(x)
+ y := make([]keyValue, length)
+ return &y
+}
+
+func putKeyValues(x *[]keyValue) {
+ *x = (*x)[:0]
+ keyValuePool.Put(x)
+}
+
+func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return err
+ }
+
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+
+ flds := structType.fields
+
+ encodeHead(e, byte(cborTypeArray), uint64(len(flds)))
+ for i := 0; i < len(flds); i++ {
+ f := flds[i]
+
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Write CBOR nil for null pointer to embedded struct
+ e.Write(cborNil)
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+
+ if err := f.ef(e, em, fv); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return err
+ }
+
+ flds := structType.getFields(em)
+
+ start := 0
+ if em.sort == SortFastShuffle && len(flds) > 0 {
+ start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting.
+ }
+
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+
+ // Encode head with struct field count.
+ // Head is rewritten later if actual encoded field count is different from struct field count.
+ encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds)))
+
+ kvbegin := e.Len()
+ kvcount := 0
+ for offset := 0; offset < len(flds); offset++ {
+ f := flds[(start+offset)%len(flds)]
+
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Skip null pointer to embedded struct
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+ if f.omitEmpty {
+ empty, err := f.ief(em, fv)
+ if err != nil {
+ return err
+ }
+ if empty {
+ continue
+ }
+ }
+ if f.omitZero {
+ zero, err := f.izf(fv)
+ if err != nil {
+ return err
+ }
+ if zero {
+ continue
+ }
+ }
+
+ if !f.keyAsInt && em.fieldName == FieldNameToByteString {
+ e.Write(f.cborNameByteString)
+ } else { // int or text string
+ e.Write(f.cborName)
+ }
+
+ if err := f.ef(e, em, fv); err != nil {
+ return err
+ }
+
+ kvcount++
+ }
+
+ if len(flds) == kvcount {
+ // Encoded element count in head is the same as actual element count.
+ return nil
+ }
+
+ // Overwrite the bytes that were reserved for the head before encoding the map entries.
+ var actualHeadLen int
+ {
+ headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin])
+ actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount))
+ }
+
+ if actualHeadLen == encodedHeadLen {
+ // The bytes reserved for the encoded head were exactly the right size, so the
+ // encoded entries are already in their final positions.
+ return nil
+ }
+
+ // We reserved more bytes than needed for the encoded head, based on the number of fields
+ // encoded. The encoded entries are offset to the right by the number of excess reserved
+ // bytes. Shift the entries left to remove the gap.
+ excessReservedBytes := encodedHeadLen - actualHeadLen
+ dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes]
+ src := e.Bytes()[kvbegin:e.Len()]
+ copy(dst, src)
+
+ // After shifting, the excess bytes are at the end of the output buffer and they are
+ // garbage.
+ e.Truncate(e.Len() - excessReservedBytes)
+ return nil
+}
+
+func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if v.IsNil() {
+ e.Write(cborNil)
+ return nil
+ }
+ return encode(e, em, v.Elem())
+}
+
+func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ t := v.Interface().(time.Time)
+ if t.IsZero() {
+ e.Write(cborNil) // Even if tag is required, encode as CBOR null.
+ return nil
+ }
+ if em.timeTag == EncTagRequired {
+ tagNumber := 1
+ if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano {
+ tagNumber = 0
+ }
+ encodeHead(e, byte(cborTypeTag), uint64(tagNumber))
+ }
+ switch em.time {
+ case TimeUnix:
+ secs := t.Unix()
+ return encodeInt(e, em, reflect.ValueOf(secs))
+
+ case TimeUnixMicro:
+ t = t.UTC().Round(time.Microsecond)
+ f := float64(t.UnixNano()) / 1e9
+ return encodeFloat(e, em, reflect.ValueOf(f))
+
+ case TimeUnixDynamic:
+ t = t.UTC().Round(time.Microsecond)
+ secs, nsecs := t.Unix(), uint64(t.Nanosecond())
+ if nsecs == 0 {
+ return encodeInt(e, em, reflect.ValueOf(secs))
+ }
+ f := float64(secs) + float64(nsecs)/1e9
+ return encodeFloat(e, em, reflect.ValueOf(f))
+
+ case TimeRFC3339:
+ s := t.Format(time.RFC3339)
+ return encodeString(e, em, reflect.ValueOf(s))
+
+ default: // TimeRFC3339Nano
+ s := t.Format(time.RFC3339Nano)
+ return encodeString(e, em, reflect.ValueOf(s))
+ }
+}
+
+func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.bigIntConvert == BigIntConvertReject {
+ return &UnsupportedTypeError{Type: typeBigInt}
+ }
+
+ vbi := v.Interface().(big.Int)
+ sign := vbi.Sign()
+ bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v
+ if sign < 0 {
+ // For negative number, convert to CBOR encoded number (-v-1).
+ bi.Sub(bi, big.NewInt(1))
+ }
+
+ if em.bigIntConvert == BigIntConvertShortest {
+ if bi.IsUint64() {
+ if sign >= 0 {
+ // Encode as CBOR pos int (major type 0)
+ encodeHead(e, byte(cborTypePositiveInt), bi.Uint64())
+ return nil
+ }
+ // Encode as CBOR neg int (major type 1)
+ encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64())
+ return nil
+ }
+ }
+
+ tagNum := 2
+ if sign < 0 {
+ tagNum = 3
+ }
+ // Write tag number
+ encodeHead(e, byte(cborTypeTag), uint64(tagNum))
+ // Write bignum byte string
+ b := bi.Bytes()
+ encodeHead(e, byte(cborTypeByteString), uint64(len(b)))
+ e.Write(b)
+ return nil
+}
+
+type binaryMarshalerEncoder struct {
+ alternateEncode encodeFunc
+ alternateIsEmpty isEmptyFunc
+}
+
+func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.binaryMarshaler != BinaryMarshalerByteString {
+ return bme.alternateEncode(e, em, v)
+ }
+
+ vt := v.Type()
+ m, ok := v.Interface().(encoding.BinaryMarshaler)
+ if !ok {
+ pv := reflect.New(vt)
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.BinaryMarshaler)
+ }
+ data, err := m.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if b := em.encTagBytes(vt); b != nil {
+ e.Write(b)
+ }
+ encodeHead(e, byte(cborTypeByteString), uint64(len(data)))
+ e.Write(data)
+ return nil
+}
+
+func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
+ if em.binaryMarshaler != BinaryMarshalerByteString {
+ return bme.alternateIsEmpty(em, v)
+ }
+
+ m, ok := v.Interface().(encoding.BinaryMarshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.BinaryMarshaler)
+ }
+ data, err := m.MarshalBinary()
+ if err != nil {
+ return false, err
+ }
+ return len(data) == 0, nil
+}
+
+type textMarshalerEncoder struct {
+ alternateEncode encodeFunc
+ alternateIsEmpty isEmptyFunc
+}
+
+func (tme textMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.textMarshaler == TextMarshalerNone {
+ return tme.alternateEncode(e, em, v)
+ }
+
+ vt := v.Type()
+ m, ok := v.Interface().(encoding.TextMarshaler)
+ if !ok {
+ pv := reflect.New(vt)
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.TextMarshaler)
+ }
+ data, err := m.MarshalText()
+ if err != nil {
+ return fmt.Errorf("cbor: cannot marshal text for %s: %w", vt, err)
+ }
+ if b := em.encTagBytes(vt); b != nil {
+ e.Write(b)
+ }
+
+ encodeHead(e, byte(cborTypeTextString), uint64(len(data)))
+ e.Write(data)
+ return nil
+}
+
+func (tme textMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
+ if em.textMarshaler == TextMarshalerNone {
+ return tme.alternateIsEmpty(em, v)
+ }
+
+ m, ok := v.Interface().(encoding.TextMarshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.TextMarshaler)
+ }
+ data, err := m.MarshalText()
+ if err != nil {
+ return false, fmt.Errorf("cbor: cannot marshal text for %s: %w", v.Type(), err)
+ }
+ return len(data) == 0, nil
+}
+
+type jsonMarshalerEncoder struct {
+ alternateEncode encodeFunc
+ alternateIsEmpty isEmptyFunc
+}
+
+func (jme jsonMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.jsonMarshalerTranscoder == nil {
+ return jme.alternateEncode(e, em, v)
+ }
+
+ vt := v.Type()
+ m, ok := v.Interface().(jsonMarshaler)
+ if !ok {
+ pv := reflect.New(vt)
+ pv.Elem().Set(v)
+ m = pv.Interface().(jsonMarshaler)
+ }
+
+ json, err := m.MarshalJSON()
+ if err != nil {
+ return err
+ }
+
+ offset := e.Len()
+
+ if b := em.encTagBytes(vt); b != nil {
+ e.Write(b)
+ }
+
+ if err := em.jsonMarshalerTranscoder.Transcode(e, bytes.NewReader(json)); err != nil {
+ return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
+ }
+
+ // Validate that the transcode function has written exactly one well-formed data item.
+ d := decoder{data: e.Bytes()[offset:], dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
+ if err := d.wellformed(false, true); err != nil {
+ e.Truncate(offset)
+ return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
+ }
+
+ return nil
+}
+
+func (jme jsonMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
+ if em.jsonMarshalerTranscoder == nil {
+ return jme.alternateIsEmpty(em, v)
+ }
+
+ // As with types implementing cbor.Marshaler, transcoded json.Marshaler values always encode
+ // as exactly one complete CBOR data item.
+ return false, nil
+}
+
+func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.tagsMd == TagsForbidden && v.Type() == typeRawTag {
+ return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden")
+ }
+ m, ok := v.Interface().(Marshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(Marshaler)
+ }
+ data, err := m.MarshalCBOR()
+ if err != nil {
+ return err
+ }
+
+ // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3.
+ d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
+ err = d.wellformed(false, true)
+ if err != nil {
+ return &MarshalerError{typ: v.Type(), err: err}
+ }
+
+ e.Write(data)
+ return nil
+}
+
+func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.tagsMd == TagsForbidden {
+ return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden")
+ }
+
+ t := v.Interface().(Tag)
+
+ if t.Number == 0 && t.Content == nil {
+ // Marshal uninitialized cbor.Tag
+ e.Write(cborNil)
+ return nil
+ }
+
+ // Marshal tag number
+ encodeHead(e, byte(cborTypeTag), t.Number)
+
+ vem := *em // shallow copy
+
+ // For built-in tags, disable settings that may introduce tag validity errors when
+ // marshaling certain Content values.
+ switch t.Number {
+ case tagNumRFC3339Time:
+ vem.stringType = StringToTextString
+ vem.stringMajorType = cborTypeTextString
+ case tagNumUnsignedBignum, tagNumNegativeBignum:
+ vem.byteSliceLaterFormat = ByteSliceLaterFormatNone
+ vem.byteSliceLaterEncodingTag = 0
+ }
+
+ // Marshal tag content
+ return encode(e, &vem, reflect.ValueOf(t.Content))
+}
+
+// encodeHead writes CBOR head of specified type t and returns number of bytes written.
+func encodeHead(e *bytes.Buffer, t byte, n uint64) int {
+ if n <= maxAdditionalInformationWithoutArgument {
+ const headSize = 1
+ e.WriteByte(t | byte(n))
+ return headSize
+ }
+
+ if n <= math.MaxUint8 {
+ const headSize = 2
+ scratch := [headSize]byte{
+ t | byte(additionalInformationWith1ByteArgument),
+ byte(n),
+ }
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ if n <= math.MaxUint16 {
+ const headSize = 3
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith2ByteArgument)
+ binary.BigEndian.PutUint16(scratch[1:], uint16(n))
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ if n <= math.MaxUint32 {
+ const headSize = 5
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith4ByteArgument)
+ binary.BigEndian.PutUint32(scratch[1:], uint32(n))
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ const headSize = 9
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith8ByteArgument)
+ binary.BigEndian.PutUint64(scratch[1:], n)
+ e.Write(scratch[:])
+ return headSize
+}
+
+type jsonMarshaler interface{ MarshalJSON() ([]byte, error) }
+
+var (
+ typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ typeTextMarshaler = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ typeJSONMarshaler = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+ typeRawMessage = reflect.TypeOf(RawMessage(nil))
+ typeByteString = reflect.TypeOf(ByteString(""))
+)
+
+func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc, izf isZeroFunc) {
+ k := t.Kind()
+ if k == reflect.Pointer {
+ return getEncodeIndirectValueFunc(t), isEmptyPtr, getIsZeroFunc(t)
+ }
+ switch t {
+ case typeSimpleValue:
+ return encodeMarshalerType, isEmptyUint, getIsZeroFunc(t)
+
+ case typeTag:
+ return encodeTag, alwaysNotEmpty, getIsZeroFunc(t)
+
+ case typeTime:
+ return encodeTime, alwaysNotEmpty, getIsZeroFunc(t)
+
+ case typeBigInt:
+ return encodeBigInt, alwaysNotEmpty, getIsZeroFunc(t)
+
+ case typeRawMessage:
+ return encodeMarshalerType, isEmptySlice, getIsZeroFunc(t)
+
+ case typeByteString:
+ return encodeMarshalerType, isEmptyString, getIsZeroFunc(t)
+ }
+ if reflect.PointerTo(t).Implements(typeMarshaler) {
+ return encodeMarshalerType, alwaysNotEmpty, getIsZeroFunc(t)
+ }
+ if reflect.PointerTo(t).Implements(typeBinaryMarshaler) {
+ defer func() {
+ // capture encoding method used for modes that disable BinaryMarshaler
+ bme := binaryMarshalerEncoder{
+ alternateEncode: ef,
+ alternateIsEmpty: ief,
+ }
+ ef = bme.encode
+ ief = bme.isEmpty
+ }()
+ }
+ if reflect.PointerTo(t).Implements(typeTextMarshaler) {
+ defer func() {
+ // capture encoding method used for modes that disable TextMarshaler
+ tme := textMarshalerEncoder{
+ alternateEncode: ef,
+ alternateIsEmpty: ief,
+ }
+ ef = tme.encode
+ ief = tme.isEmpty
+ }()
+ }
+ if reflect.PointerTo(t).Implements(typeJSONMarshaler) {
+ defer func() {
+ // capture encoding method used for modes that don't support transcoding
+ // from types that implement json.Marshaler.
+ jme := jsonMarshalerEncoder{
+ alternateEncode: ef,
+ alternateIsEmpty: ief,
+ }
+ ef = jme.encode
+ ief = jme.isEmpty
+ }()
+ }
+
+ switch k {
+ case reflect.Bool:
+ return encodeBool, isEmptyBool, getIsZeroFunc(t)
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return encodeInt, isEmptyInt, getIsZeroFunc(t)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return encodeUint, isEmptyUint, getIsZeroFunc(t)
+
+ case reflect.Float32, reflect.Float64:
+ return encodeFloat, isEmptyFloat, getIsZeroFunc(t)
+
+ case reflect.String:
+ return encodeString, isEmptyString, getIsZeroFunc(t)
+
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteString, isEmptySlice, getIsZeroFunc(t)
+ }
+ fallthrough
+
+ case reflect.Array:
+ f, _, _ := getEncodeFunc(t.Elem())
+ if f == nil {
+ return nil, nil, nil
+ }
+ return arrayEncodeFunc{f: f}.encode, isEmptySlice, getIsZeroFunc(t)
+
+ case reflect.Map:
+ f := getEncodeMapFunc(t)
+ if f == nil {
+ return nil, nil, nil
+ }
+ return f, isEmptyMap, getIsZeroFunc(t)
+
+ case reflect.Struct:
+ // Get struct's special field "_" tag options
+ if f, ok := t.FieldByName("_"); ok {
+ tag := f.Tag.Get("cbor")
+ if tag != "-" {
+ if hasToArrayOption(tag) {
+ return encodeStructToArray, isEmptyStruct, isZeroFieldStruct
+ }
+ }
+ }
+ return encodeStruct, isEmptyStruct, getIsZeroFunc(t)
+
+ case reflect.Interface:
+ return encodeIntf, isEmptyIntf, getIsZeroFunc(t)
+ }
+ return nil, nil, nil
+}
+
+func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc {
+ for t.Kind() == reflect.Pointer {
+ t = t.Elem()
+ }
+ f, _, _ := getEncodeFunc(t)
+ if f == nil {
+ return nil
+ }
+ return func(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ for v.Kind() == reflect.Pointer && !v.IsNil() {
+ v = v.Elem()
+ }
+ if v.Kind() == reflect.Pointer && v.IsNil() {
+ e.Write(cborNil)
+ return nil
+ }
+ return f(e, em, v)
+ }
+}
+
+func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) {
+ return false, nil
+}
+
+func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) {
+ return !v.Bool(), nil
+}
+
+func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Int() == 0, nil
+}
+
+func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Uint() == 0, nil
+}
+
+func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Float() == 0.0, nil
+}
+
+func isEmptyString(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) {
+ return v.IsNil(), nil
+}
+
+func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) {
+ return v.IsNil(), nil
+}
+
+func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return false, err
+ }
+
+ if em.omitEmpty == OmitEmptyGoValue {
+ return false, nil
+ }
+
+ if structType.toArray {
+ return len(structType.fields) == 0, nil
+ }
+
+ if len(structType.fields) > len(structType.omitEmptyFieldsIdx) {
+ return false, nil
+ }
+
+ for _, i := range structType.omitEmptyFieldsIdx {
+ f := structType.fields[i]
+
+ // Get field value
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Skip null pointer to embedded struct
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+
+ empty, err := f.ief(em, fv)
+ if err != nil {
+ return false, err
+ }
+ if !empty {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+func cannotFitFloat32(f64 float64) bool {
+ f32 := float32(f64)
+ return float64(f32) != f64
+}
+
+// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit.
+func float32NaNFromReflectValue(v reflect.Value) float32 {
+ // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400
+ p := reflect.New(v.Type())
+ p.Elem().Set(v)
+ f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32)
+ return f32
+}
+
+type isZeroer interface {
+ IsZero() bool
+}
+
+var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
+
+// getIsZeroFunc returns a function for the given type that can be called to determine if a given value is zero.
+// Types that implement `IsZero() bool` are delegated to for non-nil values.
+// Types that do not implement `IsZero() bool` use the reflect.Value#IsZero() implementation.
+// The returned function matches behavior of stdlib encoding/json behavior in Go 1.24+.
+func getIsZeroFunc(t reflect.Type) isZeroFunc {
+ // Provide a function that uses a type's IsZero method if defined.
+ switch {
+ case t == nil:
+ return isZeroDefault
+ case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
+ return isZeroInterfaceCustom
+ case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
+ return isZeroPointerCustom
+ case t.Implements(isZeroerType):
+ return isZeroCustom
+ case reflect.PointerTo(t).Implements(isZeroerType):
+ return isZeroAddrCustom
+ default:
+ return isZeroDefault
+ }
+}
+
+// isZeroInterfaceCustom returns true for nil or pointer-to-nil values,
+// and delegates to the custom IsZero() implementation otherwise.
+func isZeroInterfaceCustom(v reflect.Value) (bool, error) {
+ kind := v.Kind()
+
+ switch kind {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Interface, reflect.Slice:
+ if v.IsNil() {
+ return true, nil
+ }
+ }
+
+ switch kind {
+ case reflect.Interface, reflect.Pointer:
+ if elem := v.Elem(); elem.Kind() == reflect.Pointer && elem.IsNil() {
+ return true, nil
+ }
+ }
+
+ return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroPointerCustom returns true for nil values,
+// and delegates to the custom IsZero() implementation otherwise.
+func isZeroPointerCustom(v reflect.Value) (bool, error) {
+ if v.IsNil() {
+ return true, nil
+ }
+ return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroCustom delegates to the custom IsZero() implementation.
+func isZeroCustom(v reflect.Value) (bool, error) {
+ return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroAddrCustom delegates to the custom IsZero() implementation of the addr of the value.
+func isZeroAddrCustom(v reflect.Value) (bool, error) {
+ if !v.CanAddr() {
+ // Temporarily box v so we can take the address.
+ v2 := reflect.New(v.Type()).Elem()
+ v2.Set(v)
+ v = v2
+ }
+ return v.Addr().Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroDefault calls reflect.Value#IsZero()
+func isZeroDefault(v reflect.Value) (bool, error) {
+ if !v.IsValid() {
+ // v is zero value
+ return true, nil
+ }
+ return v.IsZero(), nil
+}
+
+// isZeroFieldStruct is used to determine whether to omit toarray structs
+func isZeroFieldStruct(v reflect.Value) (bool, error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return false, err
+ }
+ return len(structType.fields) == 0, nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
new file mode 100644
index 0000000000..2871bfdab9
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
@@ -0,0 +1,92 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "reflect"
+ "sync"
+)
+
+type mapKeyValueEncodeFunc struct {
+ kf, ef encodeFunc
+ kpool, vpool sync.Pool
+}
+
+func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
+ iterk := me.kpool.Get().(*reflect.Value)
+ defer func() {
+ iterk.SetZero()
+ me.kpool.Put(iterk)
+ }()
+ iterv := me.vpool.Get().(*reflect.Value)
+ defer func() {
+ iterv.SetZero()
+ me.vpool.Put(iterv)
+ }()
+
+ if kvs == nil {
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ iterk.SetIterKey(iter)
+ iterv.SetIterValue(iter)
+
+ if err := me.kf(e, em, *iterk); err != nil {
+ return err
+ }
+ if err := me.ef(e, em, *iterv); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ initial := e.Len()
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ iterk.SetIterKey(iter)
+ iterv.SetIterValue(iter)
+
+ offset := e.Len()
+ if err := me.kf(e, em, *iterk); err != nil {
+ return err
+ }
+ valueOffset := e.Len()
+ if err := me.ef(e, em, *iterv); err != nil {
+ return err
+ }
+ kvs[i] = keyValue{
+ offset: offset - initial,
+ valueOffset: valueOffset - initial,
+ nextOffset: e.Len() - initial,
+ }
+ }
+
+ return nil
+}
+
+func getEncodeMapFunc(t reflect.Type) encodeFunc {
+ kf, _, _ := getEncodeFunc(t.Key())
+ ef, _, _ := getEncodeFunc(t.Elem())
+ if kf == nil || ef == nil {
+ return nil
+ }
+ mkv := &mapKeyValueEncodeFunc{
+ kf: kf,
+ ef: ef,
+ kpool: sync.Pool{
+ New: func() any {
+ rk := reflect.New(t.Key()).Elem()
+ return &rk
+ },
+ },
+ vpool: sync.Pool{
+ New: func() any {
+ rv := reflect.New(t.Elem()).Elem()
+ return &rv
+ },
+ },
+ }
+ return mapEncodeFunc{
+ e: mkv.encodeKeyValues,
+ }.encode
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go b/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
new file mode 100644
index 0000000000..c893a411da
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
@@ -0,0 +1,8 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build go1.24
+
+package cbor
+
+var jsonStdlibSupportsOmitzero = true
diff --git a/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go b/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
new file mode 100644
index 0000000000..db86a63217
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
@@ -0,0 +1,8 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build !go1.24
+
+package cbor
+
+var jsonStdlibSupportsOmitzero = false
diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
new file mode 100644
index 0000000000..30f72814f6
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
@@ -0,0 +1,98 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// SimpleValue represents CBOR simple value.
+// CBOR simple value is:
+// - an extension point like CBOR tag.
+// - a subset of CBOR major type 7 that isn't floating-point.
+// - "identified by a number between 0 and 255, but distinct from that number itself".
+// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
+//
+// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
+// Other CBOR simple values are currently unassigned/reserved by IANA.
+type SimpleValue uint8
+
+var (
+ typeSimpleValue = reflect.TypeOf(SimpleValue(0))
+)
+
+// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
+func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
+ // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
+ // "An encoder MUST NOT issue two-byte sequences that start with 0xf8
+ // (major type 7, additional information 24) and continue with a byte
+ // less than 0x20 (32 decimal). Such sequences are not well-formed.
+ // (This implies that an encoder cannot encode false, true, null, or
+ // undefined in two-byte sequences and that only the one-byte variants
+ // of these are well-formed; more generally speaking, each simple value
+ // only has a single representation variant)."
+
+ switch {
+ case sv <= maxSimpleValueInAdditionalInformation:
+ return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
+
+ case sv >= minSimpleValueIn1ByteArgument:
+ return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
+
+ default:
+ return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
+ }
+}
+
+// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
+func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
+ if sv == nil {
+ return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check well-formedness of CBOR data item.
+ // SimpleValue.UnmarshalCBOR() is exported, so
+ // the codec needs to support same behavior for:
+ // - Unmarshal(data, *SimpleValue)
+ // - SimpleValue.UnmarshalCBOR(data)
+ err := d.wellformed(false, false)
+ if err != nil {
+ return err
+ }
+
+ return sv.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (sv *SimpleValue) unmarshalCBOR(data []byte) error {
+ if sv == nil {
+ return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ typ, ai, val := d.getHead()
+
+ if typ != cborTypePrimitives {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
+ }
+ if ai > additionalInformationWith1ByteArgument {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
+ }
+
+ // It is safe to cast val to uint8 here because
+ // - data is already verified to be well-formed CBOR simple value and
+ // - val is <= math.MaxUint8.
+ *sv = SimpleValue(val)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go
new file mode 100644
index 0000000000..7ac6d7d671
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/stream.go
@@ -0,0 +1,277 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "reflect"
+)
+
+// Decoder reads and decodes CBOR values from io.Reader.
+type Decoder struct {
+ r io.Reader
+ d decoder
+ buf []byte
+ off int // next read offset in buf
+ bytesRead int
+}
+
+// NewDecoder returns a new decoder that reads and decodes from r using
+// the default decoding options.
+func NewDecoder(r io.Reader) *Decoder {
+ return defaultDecMode.NewDecoder(r)
+}
+
+// Decode reads CBOR value and decodes it into the value pointed to by v.
+func (dec *Decoder) Decode(v any) error {
+ _, err := dec.readNext()
+ if err != nil {
+ // Return validation error or read error.
+ return err
+ }
+
+ dec.d.reset(dec.buf[dec.off:])
+ err = dec.d.value(v)
+
+ // Increment dec.off even if decoding err is not nil because
+ // dec.d.off points to the next CBOR data item if current
+ // CBOR data item is valid but failed to be decoded into v.
+ // This allows next CBOR data item to be decoded in next
+ // call to this function.
+ dec.off += dec.d.off
+ dec.bytesRead += dec.d.off
+
+ return err
+}
+
+// Skip skips to the next CBOR data item (if there is any),
+// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
+func (dec *Decoder) Skip() error {
+ n, err := dec.readNext()
+ if err != nil {
+ // Return validation error or read error.
+ return err
+ }
+
+ dec.off += n
+ dec.bytesRead += n
+ return nil
+}
+
+// NumBytesRead returns the number of bytes read.
+func (dec *Decoder) NumBytesRead() int {
+ return dec.bytesRead
+}
+
+// Buffered returns a reader for data remaining in Decoder's buffer.
+// Returned reader is valid until the next call to Decode or Skip.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.off:])
+}
+
+// readNext() reads next CBOR data item from Reader to buffer.
+// It returns the size of next CBOR data item.
+// It also returns validation error or read error if any.
+func (dec *Decoder) readNext() (int, error) {
+ var readErr error
+ var validErr error
+
+ for {
+ // Process any unread data in dec.buf.
+ if dec.off < len(dec.buf) {
+ dec.d.reset(dec.buf[dec.off:])
+ off := dec.off // Save offset before data validation
+ validErr = dec.d.wellformed(true, false)
+ dec.off = off // Restore offset
+
+ if validErr == nil {
+ return dec.d.off, nil
+ }
+
+ if validErr != io.ErrUnexpectedEOF {
+ return 0, validErr
+ }
+
+ // Process last read error on io.ErrUnexpectedEOF.
+ if readErr != nil {
+ if readErr == io.EOF {
+ // current CBOR data item is incomplete.
+ return 0, io.ErrUnexpectedEOF
+ }
+ return 0, readErr
+ }
+ }
+
+ // More data is needed and there was no read error.
+ var n int
+ for n == 0 {
+ n, readErr = dec.read()
+ if n == 0 && readErr != nil {
+ // No more data can be read and read error is encountered.
+ // At this point, validErr is either nil or io.ErrUnexpectedEOF.
+ if readErr == io.EOF {
+ if validErr == io.ErrUnexpectedEOF {
+ // current CBOR data item is incomplete.
+ return 0, io.ErrUnexpectedEOF
+ }
+ }
+ return 0, readErr
+ }
+ }
+
+ // At this point, dec.buf contains new data from last read (n > 0).
+ }
+}
+
+// read() reads data from Reader to buffer.
+// It returns number of bytes read and any read error encountered.
+// Postconditions:
+// - dec.buf contains previously unread data and new data.
+// - dec.off is 0.
+func (dec *Decoder) read() (int, error) {
+ // Grow buf if needed.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
+ oldUnreadBuf := dec.buf[dec.off:]
+ dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
+ dec.overwriteBuf(oldUnreadBuf)
+ }
+
+ // Copy unread data over read data and reset off to 0.
+ if dec.off > 0 {
+ dec.overwriteBuf(dec.buf[dec.off:])
+ }
+
+ // Read from reader and reslice buf.
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+ return n, err
+}
+
+func (dec *Decoder) overwriteBuf(newBuf []byte) {
+ n := copy(dec.buf, newBuf)
+ dec.buf = dec.buf[:n]
+ dec.off = 0
+}
+
+// Encoder writes CBOR values to io.Writer.
+type Encoder struct {
+ w io.Writer
+ em *encMode
+ indefTypes []cborType
+}
+
+// NewEncoder returns a new encoder that writes to w using the default encoding options.
+func NewEncoder(w io.Writer) *Encoder {
+ return defaultEncMode.NewEncoder(w)
+}
+
+// Encode writes the CBOR encoding of v.
+func (enc *Encoder) Encode(v any) error {
+ if len(enc.indefTypes) > 0 && v != nil {
+ indefType := enc.indefTypes[len(enc.indefTypes)-1]
+ if indefType == cborTypeTextString {
+ k := reflect.TypeOf(v).Kind()
+ if k != reflect.String {
+ return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
+ }
+ } else if indefType == cborTypeByteString {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+ if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
+ return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
+ }
+ }
+ }
+
+ buf := getEncodeBuffer()
+
+ err := encode(buf, enc.em, reflect.ValueOf(v))
+ if err == nil {
+ _, err = enc.w.Write(buf.Bytes())
+ }
+
+ putEncodeBuffer(buf)
+ return err
+}
+
+// StartIndefiniteByteString starts byte string encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
+// ("chunks") as one contiguous string until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteByteString() error {
+ return enc.startIndefinite(cborTypeByteString)
+}
+
+// StartIndefiniteTextString starts text string encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
+// ("chunks") as one contiguous string until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteTextString() error {
+ return enc.startIndefinite(cborTypeTextString)
+}
+
+// StartIndefiniteArray starts array encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes elements of the array
+// until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteArray() error {
+ return enc.startIndefinite(cborTypeArray)
+}
+
+// StartIndefiniteMap starts array encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes elements of the map
+// until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteMap() error {
+ return enc.startIndefinite(cborTypeMap)
+}
+
+// EndIndefinite closes last opened indefinite length value.
+func (enc *Encoder) EndIndefinite() error {
+ if len(enc.indefTypes) == 0 {
+ return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
+ }
+ _, err := enc.w.Write([]byte{cborBreakFlag})
+ if err == nil {
+ enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
+ }
+ return err
+}
+
+var cborIndefHeader = map[cborType][]byte{
+ cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
+ cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
+ cborTypeArray: {cborArrayWithIndefiniteLengthHead},
+ cborTypeMap: {cborMapWithIndefiniteLengthHead},
+}
+
+func (enc *Encoder) startIndefinite(typ cborType) error {
+ if enc.em.indefLength == IndefLengthForbidden {
+ return &IndefiniteLengthError{typ}
+ }
+ _, err := enc.w.Write(cborIndefHeader[typ])
+ if err == nil {
+ enc.indefTypes = append(enc.indefTypes, typ)
+ }
+ return err
+}
+
+// RawMessage is a raw encoded CBOR value.
+type RawMessage []byte
+
+// MarshalCBOR returns m or CBOR nil if m is nil.
+func (m RawMessage) MarshalCBOR() ([]byte, error) {
+ if len(m) == 0 {
+ return cborNil, nil
+ }
+ return m, nil
+}
+
+// UnmarshalCBOR creates a copy of data and saves to *m.
+func (m *RawMessage) UnmarshalCBOR(data []byte) error {
+ if m == nil {
+ return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go
new file mode 100644
index 0000000000..cf0a922cd7
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go
@@ -0,0 +1,268 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+)
+
+type field struct {
+ name string
+ nameAsInt int64 // used to decoder to match field name with CBOR int
+ cborName []byte
+ cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
+ idx []int
+ typ reflect.Type
+ ef encodeFunc
+ ief isEmptyFunc
+ izf isZeroFunc
+ typInfo *typeInfo // used to decoder to reuse type info
+ tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
+ omitEmpty bool // used to skip empty field
+ omitZero bool // used to skip zero field
+ keyAsInt bool // used to encode/decode field name as int
+}
+
+type fields []*field
+
+// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
+type indexFieldSorter struct {
+ fields fields
+}
+
+func (x *indexFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *indexFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *indexFieldSorter) Less(i, j int) bool {
+ iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
+ for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
+ if iIdx[k] != jIdx[k] {
+ return iIdx[k] < jIdx[k]
+ }
+ }
+ return len(iIdx) <= len(jIdx)
+}
+
+// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
+type nameLevelAndTagFieldSorter struct {
+ fields fields
+}
+
+func (x *nameLevelAndTagFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
+ fi, fj := x.fields[i], x.fields[j]
+ if fi.name != fj.name {
+ return fi.name < fj.name
+ }
+ if len(fi.idx) != len(fj.idx) {
+ return len(fi.idx) < len(fj.idx)
+ }
+ if fi.tagged != fj.tagged {
+ return fi.tagged
+ }
+ return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
+}
+
+// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
+func getFields(t reflect.Type) (flds fields, structOptions string) {
+ // Get special field "_" tag options
+ if f, ok := t.FieldByName("_"); ok {
+ tag := f.Tag.Get("cbor")
+ if tag != "-" {
+ structOptions = tag
+ }
+ }
+
+ // nTypes contains next level anonymous fields' types and indexes
+ // (there can be multiple fields of the same type at the same level)
+ flds, nTypes := appendFields(t, nil, nil, nil)
+
+ if len(nTypes) > 0 {
+
+ var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
+ vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
+
+ for len(nTypes) > 0 {
+ cTypes, nTypes = nTypes, nil
+
+ for t, idx := range cTypes {
+ // If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
+ if len(idx) > 1 {
+ continue
+ }
+
+ // Anonymous field of the same type at deeper nested level is ignored.
+ if vTypes[t] {
+ continue
+ }
+ vTypes[t] = true
+
+ flds, nTypes = appendFields(t, idx[0], flds, nTypes)
+ }
+ }
+ }
+
+ sort.Sort(&nameLevelAndTagFieldSorter{flds})
+
+ // Keep visible fields.
+ j := 0 // index of next unique field
+ for i := 0; i < len(flds); {
+ name := flds[i].name
+ if i == len(flds)-1 || // last field
+ name != flds[i+1].name || // field i has unique field name
+ len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
+ (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
+ flds[j] = flds[i]
+ j++
+ }
+
+ // Skip fields with the same field name.
+ for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
+ }
+ }
+ if j != len(flds) {
+ flds = flds[:j]
+ }
+
+ // Sort fields by field index
+ sort.Sort(&indexFieldSorter{flds})
+
+ return flds, structOptions
+}
+
+// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
+func appendFields(
+ t reflect.Type,
+ idx []int,
+ flds fields,
+ nTypes map[reflect.Type][][]int,
+) (
+ _flds fields,
+ _nTypes map[reflect.Type][][]int,
+) {
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ ft := f.Type
+ for ft.Kind() == reflect.Pointer {
+ ft = ft.Elem()
+ }
+
+ if !isFieldExportable(f, ft.Kind()) {
+ continue
+ }
+
+ cborTag := true
+ tag := f.Tag.Get("cbor")
+ if tag == "" {
+ tag = f.Tag.Get("json")
+ cborTag = false
+ }
+ if tag == "-" {
+ continue
+ }
+
+ tagged := tag != ""
+
+ // Parse field tag options
+ var tagFieldName string
+ var omitempty, omitzero, keyasint bool
+ for j := 0; tag != ""; j++ {
+ var token string
+ idx := strings.IndexByte(tag, ',')
+ if idx == -1 {
+ token, tag = tag, ""
+ } else {
+ token, tag = tag[:idx], tag[idx+1:]
+ }
+ if j == 0 {
+ tagFieldName = token
+ } else {
+ switch token {
+ case "omitempty":
+ omitempty = true
+ case "omitzero":
+ if cborTag || jsonStdlibSupportsOmitzero {
+ omitzero = true
+ }
+ case "keyasint":
+ keyasint = true
+ }
+ }
+ }
+
+ fieldName := tagFieldName
+ if tagFieldName == "" {
+ fieldName = f.Name
+ }
+
+ fIdx := make([]int, len(idx)+1)
+ copy(fIdx, idx)
+ fIdx[len(fIdx)-1] = i
+
+ if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
+ flds = append(flds, &field{
+ name: fieldName,
+ idx: fIdx,
+ typ: f.Type,
+ omitEmpty: omitempty,
+ omitZero: omitzero,
+ keyAsInt: keyasint,
+ tagged: tagged})
+ } else {
+ if nTypes == nil {
+ nTypes = make(map[reflect.Type][][]int)
+ }
+ nTypes[ft] = append(nTypes[ft], fIdx)
+ }
+ }
+
+ return flds, nTypes
+}
+
+// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
+// a nonexportable anonymous field of struct type.
+// Nonexportable anonymous field of struct type can contain exportable fields.
+func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
+ return f.IsExported() || (f.Anonymous && fk == reflect.Struct)
+}
+
+type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
+
+// getFieldValue returns field value of struct v by index. When encountering null pointer
+// to anonymous (embedded) struct field, f is called with the last traversed field value.
+func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
+ fv = v
+ for i, n := range idx {
+ fv = fv.Field(n)
+
+ if i < len(idx)-1 {
+ if fv.Kind() == reflect.Pointer && fv.Type().Elem().Kind() == reflect.Struct {
+ if fv.IsNil() {
+ // Null pointer to embedded struct field
+ fv, err = f(fv)
+ if err != nil || !fv.IsValid() {
+ return fv, err
+ }
+ }
+ fv = fv.Elem()
+ }
+ }
+ }
+ return fv, nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go
new file mode 100644
index 0000000000..bd8b773f54
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/tag.go
@@ -0,0 +1,329 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// Tag represents a tagged data item (CBOR major type 6), comprising a tag number and the unmarshaled tag content.
+// NOTE: The same encoding and decoding options that apply to untagged CBOR data items also applies to tag content
+// during encoding and decoding.
+type Tag struct {
+ Number uint64
+ Content any
+}
+
+// RawTag represents a tagged data item (CBOR major type 6), comprising a tag number and the raw tag content.
+// The raw tag content (enclosed data item) is a CBOR-encoded data item.
+// RawTag can be used to delay decoding a CBOR data item or precompute encoding a CBOR data item.
+type RawTag struct {
+ Number uint64
+ Content RawMessage
+}
+
+// UnmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
+func (t *RawTag) UnmarshalCBOR(data []byte) error {
+ if t == nil {
+ return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check if data is a well-formed CBOR data item.
+ // RawTag.UnmarshalCBOR() is exported, so
+ // the codec needs to support same behavior for:
+ // - Unmarshal(data, *RawTag)
+ // - RawTag.UnmarshalCBOR(data)
+ err := d.wellformed(false, false)
+ if err != nil {
+ return err
+ }
+
+ return t.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (t *RawTag) unmarshalCBOR(data []byte) error {
+ if t == nil {
+ return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
+ }
+
+ // Decoding CBOR null and undefined to cbor.RawTag is no-op.
+ if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
+ return nil
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Unmarshal tag number.
+ typ, _, num := d.getHead()
+ if typ != cborTypeTag {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
+ }
+ t.Number = num
+
+ // Unmarshal tag content.
+ c := d.data[d.off:]
+ t.Content = make([]byte, len(c))
+ copy(t.Content, c)
+ return nil
+}
+
+// MarshalCBOR returns CBOR encoding of t.
+func (t RawTag) MarshalCBOR() ([]byte, error) {
+ if t.Number == 0 && len(t.Content) == 0 {
+ // Marshal uninitialized cbor.RawTag
+ b := make([]byte, len(cborNil))
+ copy(b, cborNil)
+ return b, nil
+ }
+
+ e := getEncodeBuffer()
+
+ encodeHead(e, byte(cborTypeTag), t.Number)
+
+ content := t.Content
+ if len(content) == 0 {
+ content = cborNil
+ }
+
+ buf := make([]byte, len(e.Bytes())+len(content))
+ n := copy(buf, e.Bytes())
+ copy(buf[n:], content)
+
+ putEncodeBuffer(e)
+ return buf, nil
+}
+
+// DecTagMode specifies how decoder handles tag number.
+type DecTagMode int
+
+const (
+ // DecTagIgnored makes decoder ignore tag number (skips if present).
+ DecTagIgnored DecTagMode = iota
+
+ // DecTagOptional makes decoder verify tag number if it's present.
+ DecTagOptional
+
+ // DecTagRequired makes decoder verify tag number and tag number must be present.
+ DecTagRequired
+
+ maxDecTagMode
+)
+
+func (dtm DecTagMode) valid() bool {
+ return dtm >= 0 && dtm < maxDecTagMode
+}
+
+// EncTagMode specifies how encoder handles tag number.
+type EncTagMode int
+
+const (
+ // EncTagNone makes encoder not encode tag number.
+ EncTagNone EncTagMode = iota
+
+ // EncTagRequired makes encoder encode tag number.
+ EncTagRequired
+
+ maxEncTagMode
+)
+
+func (etm EncTagMode) valid() bool {
+ return etm >= 0 && etm < maxEncTagMode
+}
+
+// TagOptions specifies how encoder and decoder handle tag number.
+type TagOptions struct {
+ DecTag DecTagMode
+ EncTag EncTagMode
+}
+
+// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
+// to provide CBOR tag support.
+type TagSet interface {
+ // Add adds given tag number(s), content type, and tag options to TagSet.
+ Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
+
+ // Remove removes given tag content type from TagSet.
+ Remove(contentType reflect.Type)
+
+ tagProvider
+}
+
+type tagProvider interface {
+ getTagItemFromType(t reflect.Type) *tagItem
+ getTypeFromTagNum(num []uint64) reflect.Type
+}
+
+type tagItem struct {
+ num []uint64
+ cborTagNum []byte
+ contentType reflect.Type
+ opts TagOptions
+}
+
+func (t *tagItem) equalTagNum(num []uint64) bool {
+ // Fast path to compare 1 tag number
+ if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
+ return true
+ }
+
+ if len(t.num) != len(num) {
+ return false
+ }
+
+ for i := 0; i < len(t.num); i++ {
+ if t.num[i] != num[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+type (
+ tagSet map[reflect.Type]*tagItem
+
+ syncTagSet struct {
+ sync.RWMutex
+ t tagSet
+ }
+)
+
+func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
+ return t[typ]
+}
+
+func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
+ for typ, tag := range t {
+ if tag.equalTagNum(num) {
+ return typ
+ }
+ }
+ return nil
+}
+
+// NewTagSet returns TagSet (safe for concurrency).
+func NewTagSet() TagSet {
+ return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
+}
+
+// Add adds given tag number(s), content type, and tag options to TagSet.
+func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
+ if contentType == nil {
+ return errors.New("cbor: cannot add nil content type to TagSet")
+ }
+ for contentType.Kind() == reflect.Pointer {
+ contentType = contentType.Elem()
+ }
+ tag, err := newTagItem(opts, contentType, num, nestedNum...)
+ if err != nil {
+ return err
+ }
+ t.Lock()
+ defer t.Unlock()
+ for typ, ti := range t.t {
+ if typ == contentType {
+ return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
+ }
+ if ti.equalTagNum(tag.num) {
+ return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
+ }
+ }
+ t.t[contentType] = tag
+ return nil
+}
+
+// Remove removes given tag content type from TagSet.
+func (t *syncTagSet) Remove(contentType reflect.Type) {
+ for contentType.Kind() == reflect.Pointer {
+ contentType = contentType.Elem()
+ }
+ t.Lock()
+ delete(t.t, contentType)
+ t.Unlock()
+}
+
+func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
+ t.RLock()
+ ti := t.t[typ]
+ t.RUnlock()
+ return ti
+}
+
+func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
+ t.RLock()
+ rt := t.t.getTypeFromTagNum(num)
+ t.RUnlock()
+ return rt
+}
+
+func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
+ if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
+ return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
+ }
+ if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
+ return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
+ }
+ if contentType == typeTime {
+ return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
+ }
+ if contentType == typeBigInt {
+ return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
+ }
+ if contentType == typeTag {
+ return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
+ }
+ if contentType == typeRawTag {
+ return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
+ }
+ if num == 0 || num == 1 {
+ return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
+ }
+ if num == 2 || num == 3 {
+ return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
+ }
+ if num == tagNumSelfDescribedCBOR {
+ return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
+ }
+
+ te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
+ te.num = append(te.num, nestedNum...)
+
+ // Cache encoded tag numbers
+ e := getEncodeBuffer()
+ for _, n := range te.num {
+ encodeHead(e, byte(cborTypeTag), n)
+ }
+ te.cborTagNum = make([]byte, e.Len())
+ copy(te.cborTagNum, e.Bytes())
+ putEncodeBuffer(e)
+
+ return &te, nil
+}
+
+var (
+ typeTag = reflect.TypeOf(Tag{})
+ typeRawTag = reflect.TypeOf(RawTag{})
+)
+
+// WrongTagError describes mismatch between CBOR tag and registered tag.
+type WrongTagError struct {
+ RegisteredType reflect.Type
+ RegisteredTagNum []uint64
+ TagNum []uint64
+}
+
+func (e *WrongTagError) Error() string {
+ return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go
new file mode 100644
index 0000000000..b40793b95e
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/valid.go
@@ -0,0 +1,394 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+ "math"
+ "strconv"
+
+ "github.com/x448/float16"
+)
+
+// SyntaxError is a description of a CBOR syntax error.
+type SyntaxError struct {
+ msg string
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// SemanticError is a description of a CBOR semantic error.
+type SemanticError struct {
+ msg string
+}
+
+func (e *SemanticError) Error() string { return e.msg }
+
+// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
+type MaxNestedLevelError struct {
+ maxNestedLevels int
+}
+
+func (e *MaxNestedLevelError) Error() string {
+ return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
+}
+
+// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
+type MaxArrayElementsError struct {
+ maxArrayElements int
+}
+
+func (e *MaxArrayElementsError) Error() string {
+ return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
+}
+
+// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
+type MaxMapPairsError struct {
+ maxMapPairs int
+}
+
+func (e *MaxMapPairsError) Error() string {
+ return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
+}
+
+// IndefiniteLengthError indicates found disallowed indefinite length items.
+type IndefiniteLengthError struct {
+ t cborType
+}
+
+func (e *IndefiniteLengthError) Error() string {
+ return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
+}
+
+// TagsMdError indicates found disallowed CBOR tags.
+type TagsMdError struct {
+}
+
+func (e *TagsMdError) Error() string {
+ return "cbor: CBOR tag isn't allowed"
+}
+
+// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
+type ExtraneousDataError struct {
+ numOfBytes int // number of bytes of extraneous data
+ index int // location of extraneous data
+}
+
+func (e *ExtraneousDataError) Error() string {
+ return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
+}
+
+// wellformed checks whether the CBOR data item is well-formed.
+// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
+// - use allowExtraData = true when using Decoder.Decode()
+// - use allowExtraData = false when using Unmarshal()
+func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
+ if len(d.data) == d.off {
+ return io.EOF
+ }
+ _, err := d.wellformedInternal(0, checkBuiltinTags)
+ if err == nil {
+ if !allowExtraData && d.off != len(d.data) {
+ err = &ExtraneousDataError{len(d.data) - d.off, d.off}
+ }
+ }
+ return err
+}
+
+// wellformedInternal checks data's well-formedness and returns max depth and error.
+func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
+ t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
+ if err != nil {
+ return 0, err
+ }
+
+ switch t {
+ case cborTypeByteString, cborTypeTextString:
+ if indefiniteLength {
+ if d.dm.indefLength == IndefLengthForbidden {
+ return 0, &IndefiniteLengthError{t}
+ }
+ return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
+ }
+ valInt := int(val)
+ if valInt < 0 {
+ // Detect integer overflow
+ return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
+ }
+ if len(d.data)-d.off < valInt { // valInt+off may overflow integer
+ return 0, io.ErrUnexpectedEOF
+ }
+ d.off += valInt
+
+ case cborTypeArray, cborTypeMap:
+ depth++
+ if depth > d.dm.maxNestedLevels {
+ return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
+ }
+
+ if indefiniteLength {
+ if d.dm.indefLength == IndefLengthForbidden {
+ return 0, &IndefiniteLengthError{t}
+ }
+ return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
+ }
+
+ valInt := int(val)
+ if valInt < 0 {
+ // Detect integer overflow
+ return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
+ }
+
+ if t == cborTypeArray {
+ if valInt > d.dm.maxArrayElements {
+ return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
+ }
+ } else {
+ if valInt > d.dm.maxMapPairs {
+ return 0, &MaxMapPairsError{d.dm.maxMapPairs}
+ }
+ }
+
+ count := 1
+ if t == cborTypeMap {
+ count = 2
+ }
+ maxDepth := depth
+ for j := 0; j < count; j++ {
+ for i := 0; i < valInt; i++ {
+ var dpt int
+ if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ if dpt > maxDepth {
+ maxDepth = dpt // Save max depth
+ }
+ }
+ }
+ depth = maxDepth
+
+ case cborTypeTag:
+ if d.dm.tagsMd == TagsForbidden {
+ return 0, &TagsMdError{}
+ }
+
+ tagNum := val
+
+ // Scan nested tag numbers to avoid recursion.
+ for {
+ if len(d.data) == d.off { // Tag number must be followed by tag content.
+ return 0, io.ErrUnexpectedEOF
+ }
+ if checkBuiltinTags {
+ err = validBuiltinTag(tagNum, d.data[d.off])
+ if err != nil {
+ return 0, err
+ }
+ }
+ if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
+ return 0, &UnacceptableDataItemError{
+ CBORType: cborTypeTag.String(),
+ Message: "bignum",
+ }
+ }
+ if getType(d.data[d.off]) != cborTypeTag {
+ break
+ }
+ if _, _, tagNum, err = d.wellformedHead(); err != nil {
+ return 0, err
+ }
+ depth++
+ if depth > d.dm.maxNestedLevels {
+ return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
+ }
+ }
+ // Check tag content.
+ return d.wellformedInternal(depth, checkBuiltinTags)
+ }
+
+ return depth, nil
+}
+
+// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
+func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
+ var err error
+ for {
+ if len(d.data) == d.off {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ break
+ }
+ // Peek ahead to get next type and indefinite length status.
+ nt, ai := parseInitialByte(d.data[d.off])
+ if t != nt {
+ return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
+ }
+ if additionalInformation(ai).isIndefiniteLength() {
+ return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
+ }
+ if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ }
+ return depth, nil
+}
+
+// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
+func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
+ var err error
+ maxDepth := depth
+ i := 0
+ for {
+ if len(d.data) == d.off {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ break
+ }
+ var dpt int
+ if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ if dpt > maxDepth {
+ maxDepth = dpt
+ }
+ i++
+ if t == cborTypeArray {
+ if i > d.dm.maxArrayElements {
+ return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
+ }
+ } else {
+ if i%2 == 0 && i/2 > d.dm.maxMapPairs {
+ return 0, &MaxMapPairsError{d.dm.maxMapPairs}
+ }
+ }
+ }
+ if t == cborTypeMap && i%2 == 1 {
+ return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
+ }
+ return maxDepth, nil
+}
+
+func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
+ t cborType,
+ ai byte,
+ val uint64,
+ indefiniteLength bool,
+ err error,
+) {
+ t, ai, val, err = d.wellformedHead()
+ if err != nil {
+ return
+ }
+ indefiniteLength = additionalInformation(ai).isIndefiniteLength()
+ return
+}
+
+func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
+ dataLen := len(d.data) - d.off
+ if dataLen == 0 {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+
+ t, ai = parseInitialByte(d.data[d.off])
+ val = uint64(ai)
+ d.off++
+ dataLen--
+
+ if ai <= maxAdditionalInformationWithoutArgument {
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith1ByteArgument {
+ const argumentSize = 1
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(d.data[d.off])
+ d.off++
+ if t == cborTypePrimitives && val < 32 {
+ return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith2ByteArgument {
+ const argumentSize = 2
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith4ByteArgument {
+ const argumentSize = 4
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith8ByteArgument {
+ const argumentSize = 8
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if additionalInformation(ai).isIndefiniteLength() {
+ switch t {
+ case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
+ return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
+ case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
+ return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
+ }
+ return t, ai, val, nil
+ }
+
+ // ai == 28, 29, 30
+ return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
+}
+
+func (d *decoder) acceptableFloat(f float64) error {
+ switch {
+ case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
+ return &UnacceptableDataItemError{
+ CBORType: cborTypePrimitives.String(),
+ Message: "floating-point NaN",
+ }
+ case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
+ return &UnacceptableDataItemError{
+ CBORType: cborTypePrimitives.String(),
+ Message: "floating-point infinity",
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go
deleted file mode 100644
index 36432c56fd..0000000000
--- a/vendor/github.com/go-logr/logr/slogr/slogr.go
+++ /dev/null
@@ -1,61 +0,0 @@
-//go:build go1.21
-// +build go1.21
-
-/*
-Copyright 2023 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package slogr enables usage of a slog.Handler with logr.Logger as front-end
-// API and of a logr.LogSink through the slog.Handler and thus slog.Logger
-// APIs.
-//
-// See the README in the top-level [./logr] package for a discussion of
-// interoperability.
-//
-// Deprecated: use the main logr package instead.
-package slogr
-
-import (
- "log/slog"
-
- "github.com/go-logr/logr"
-)
-
-// NewLogr returns a logr.Logger which writes to the slog.Handler.
-//
-// Deprecated: use [logr.FromSlogHandler] instead.
-func NewLogr(handler slog.Handler) logr.Logger {
- return logr.FromSlogHandler(handler)
-}
-
-// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
-//
-// Deprecated: use [logr.ToSlogHandler] instead.
-func NewSlogHandler(logger logr.Logger) slog.Handler {
- return logr.ToSlogHandler(logger)
-}
-
-// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
-//
-// Deprecated: use [logr.ToSlogHandler] instead.
-func ToSlogHandler(logger logr.Logger) slog.Handler {
- return logr.ToSlogHandler(logger)
-}
-
-// SlogSink is an optional interface that a LogSink can implement to support
-// logging through the slog.Logger or slog.Handler APIs better.
-//
-// Deprecated: use [logr.SlogSink] instead.
-type SlogSink = logr.SlogSink
diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 0000000000..22f8d21cca
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
index 813788aff1..0108f1d572 100644
--- a/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -1,6 +1,10 @@
-# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
-[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language
## Status
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
index 7df9853def..d970c7cf44 100644
--- a/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -26,6 +26,7 @@
package jsonpointer
import (
+ "encoding/json"
"errors"
"fmt"
"reflect"
@@ -40,6 +41,7 @@ const (
pointerSeparator = `/`
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
+ notFound = `Can't find the pointer in the document`
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONPointable interface {
- JSONLookup(string) (interface{}, error)
+ JSONLookup(string) (any, error)
}
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
- JSONSet(string, interface{}) error
+ JSONSet(string, any) error
}
// New creates a new json pointer for the given string
@@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
err = errors.New(invalidStart)
} else {
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
- for _, referenceToken := range referenceTokens[1:] {
- p.referenceTokens = append(p.referenceTokens, referenceToken)
- }
+ p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
}
}
@@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error {
}
// Get uses the pointer to retrieve a value from a JSON document
-func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
-func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+func (p *Pointer) Set(document any, value any) (any, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
-func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
-func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
-func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
+func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
+ }
- if rValue.Type().Implements(jsonPointableType) {
- r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
-func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
if ns, ok := node.(JSONSetable); ok { // pointer impl
@@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
return node.(JSONSetable).JSONSet(decodedToken, data)
}
- switch rValue.Kind() {
+ switch rValue.Kind() { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
}
-func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
@@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
if err != nil {
return nil, knd, err
}
- node, kind = r, knd
-
+ node = r
}
rValue := reflect.ValueOf(node)
@@ -241,11 +260,11 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
-func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
- return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {
@@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
continue
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -363,6 +382,128 @@ func (p *Pointer) String() string {
return pointerString
}
+func (p *Pointer) Offset(document string) (int64, error) {
+ dec := json.NewDecoder(strings.NewReader(document))
+ var offset int64
+ for _, ttk := range p.DecodedTokens() {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ offset, err = offsetSingleObject(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ case '[':
+ offset, err = offsetSingleArray(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return offset, nil
+}
+
+func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
+ for dec.More() {
+ offset := dec.InputOffset()
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ case string:
+ if tk == decodedToken {
+ return offset, nil
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+}
+
+func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
+ idx, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
+ }
+ var i int
+ for i = 0; i < idx && dec.More(); i++ {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ if !dec.More() {
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+ }
+ return dec.InputOffset(), nil
+}
+
+// drainSingle drains a single level of object or array.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
+func drainSingle(dec *json.Decoder) error {
+ for dec.More() {
+ tk, err := dec.Token()
+ if err != nil {
+ return err
+ }
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // Consumes the ending delim
+ if _, err := dec.Token(); err != nil {
+ return err
+ }
+ return nil
+}
+
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
@@ -377,14 +518,14 @@ const (
// Unescape unescapes a json pointer reference token string to the original representation
func Unescape(token string) string {
- step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
- step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
return step2
}
// Escape escapes a pointer reference token string
func Escape(token string) string {
- step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
- step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
return step2
}
diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore
index d69b53accc..c4b1b64f04 100644
--- a/vendor/github.com/go-openapi/swag/.gitignore
+++ b/vendor/github.com/go-openapi/swag/.gitignore
@@ -2,3 +2,4 @@ secrets.yml
vendor
Godeps
.idea
+*.out
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
index bf503e4000..80e2be0042 100644
--- a/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -4,14 +4,14 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 25
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 3
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -20,35 +20,41 @@ linters:
- lll
- gochecknoinits
- gochecknoglobals
- - nlreturn
- - testpackage
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
- wrapcheck
+ - testpackage
+ - nlreturn
- gomnd
- - exhaustive
- exhaustivestruct
- goerr113
- - wsl
- - whitespace
- - gofumpt
- - godot
+ - errorlint
- nestif
- - godox
- - funlen
- - gci
- - gocognit
+ - godot
+ - gofumpt
- paralleltest
+ - tparallel
- thelper
- ifshort
- - gomoddirectives
- - cyclop
- - forcetypeassert
- - ireturn
- - tagliatelle
- - varnamelen
- - goimports
- - tenv
- - golint
- exhaustruct
- - nilnil
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
- nosnakecase
diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 0000000000..e7f28ed6b7
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
index 217f6fa505..a729222998 100644
--- a/vendor/github.com/go-openapi/swag/README.md
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -1,7 +1,8 @@
-# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
-[](http://godoc.org/github.com/go-openapi/swag)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
[](https://goreportcard.com/report/github.com/go-openapi/swag)
Contains a bunch of helper functions for go-openapi and go-swagger projects.
@@ -18,4 +19,5 @@ You may also use it standalone for your projects.
This repo has only few dependencies outside of the standard library:
-* YAML utilities depend on gopkg.in/yaml.v2
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 0000000000..20a359bb60
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
index 00038c3773..783442fddf 100644
--- a/vendor/github.com/go-openapi/swag/loading.go
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -21,6 +21,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
var LoadHTTPCustomHeaders = map[string]string{}
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
-func LoadFromFileOrHTTP(path string) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
-func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
}
-// LoadStrategy returns a loader function for a given path or uri
-func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
- if strings.HasPrefix(path, "http") {
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
return remote
}
- return func(pth string) ([]byte, error) {
- upth, err := pathUnescape(pth)
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
if err != nil {
return nil, err
}
- if strings.HasPrefix(pth, `file://`) {
- if runtime.GOOS == "windows" {
- // support for canonical file URIs on windows.
- // Zero tolerance here for dodgy URIs.
- u, _ := url.Parse(upth)
- if u.Host != "" {
- // assume UNC name (volume share)
- // file://host/share/folder\... ==> \\host\share\path\folder
- // NOTE: UNC port not yet supported
- upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
- } else {
- // file:///c:/folder/... ==> just remove the leading slash
- upth = strings.TrimPrefix(upth, `file:///`)
- }
- } else {
- upth = strings.TrimPrefix(upth, `file://`)
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
}
}
diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go
index aa7f6a9bb8..8bb64ac32f 100644
--- a/vendor/github.com/go-openapi/swag/name_lexem.go
+++ b/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -14,74 +14,80 @@
package swag
-import "unicode"
+import (
+ "unicode"
+ "unicode/utf8"
+)
type (
- nameLexem interface {
- GetUnsafeGoName() string
- GetOriginal() string
- IsInitialism() bool
- }
+ lexemKind uint8
- initialismNameLexem struct {
+ nameLexem struct {
original string
matchedInitialism string
+ kind lexemKind
}
+)
- casualNameLexem struct {
- original string
- }
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
)
-func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
- return &initialismNameLexem{
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
-func newCasualNameLexem(original string) *casualNameLexem {
- return &casualNameLexem{
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
original: original,
}
}
-func (l *initialismNameLexem) GetUnsafeGoName() string {
- return l.matchedInitialism
-}
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
-func (l *casualNameLexem) GetUnsafeGoName() string {
- var first rune
- var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
+
if i > 0 {
rest = l.original[i:]
break
}
}
+
if len(l.original) > 1 {
- return string(unicode.ToUpper(first)) + lower(rest)
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
}
return l.original
}
-func (l *initialismNameLexem) GetOriginal() string {
+func (l nameLexem) GetOriginal() string {
return l.original
}
-func (l *casualNameLexem) GetOriginal() string {
- return l.original
-}
-
-func (l *initialismNameLexem) IsInitialism() bool {
- return true
-}
-
-func (l *casualNameLexem) IsInitialism() bool {
- return false
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
}
diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go
deleted file mode 100644
index f5228b82c0..0000000000
--- a/vendor/github.com/go-openapi/swag/post_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.8
-// +build go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.PathUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go
deleted file mode 100644
index 7c7da9c088..0000000000
--- a/vendor/github.com/go-openapi/swag/post_go19.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Since go1.9, this may be implemented with sync.Map.
-type indexOfInitialisms struct {
- sortMutex *sync.Mutex
- index *sync.Map
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- sortMutex: new(sync.Mutex),
- index: new(sync.Map),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- for k, v := range initial {
- m.index.Store(k, v)
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- _, ok := m.index.Load(key)
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.index.Store(key, true)
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- m.index.Range(func(key, value interface{}) bool {
- k := key.(string)
- result = append(result, k)
- return true
- })
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go
deleted file mode 100644
index 2757d9b95f..0000000000
--- a/vendor/github.com/go-openapi/swag/pre_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.8
-// +build !go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.QueryUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go
deleted file mode 100644
index 0565db377b..0000000000
--- a/vendor/github.com/go-openapi/swag/pre_go19.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Before go1.9, this may be implemented with a mutex on the map.
-type indexOfInitialisms struct {
- getMutex *sync.Mutex
- index map[string]bool
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- getMutex: new(sync.Mutex),
- index: make(map[string]bool, 50),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k, v := range initial {
- m.index[k] = v
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- _, ok := m.index[key]
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- m.index[key] = true
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k := range m.index {
- result = append(result, k)
- }
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go
index a1825fb7dc..274727a866 100644
--- a/vendor/github.com/go-openapi/swag/split.go
+++ b/vendor/github.com/go-openapi/swag/split.go
@@ -15,124 +15,269 @@
package swag
import (
+ "bytes"
+ "sync"
"unicode"
+ "unicode/utf8"
)
-var nameReplaceTable = map[rune]string{
- '@': "At ",
- '&': "And ",
- '|': "Pipe ",
- '$': "Dollar ",
- '!': "Bang ",
- '-': "",
- '_': "",
-}
-
type (
splitter struct {
- postSplitInitialismCheck bool
initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
+ }
+
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
+)
+
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
+
+ matchesPool struct {
+ *sync.Pool
}
- splitterOption func(*splitter) *splitter
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
)
-// split calls the splitter; splitter provides more control and post options
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
func split(str string) []string {
- lexems := newSplitter().split(str)
- result := make([]string, 0, len(lexems))
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
- for _, lexem := range lexems {
+ for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
return result
}
-func (s *splitter) split(str string) []nameLexem {
- return s.toNameLexems(str)
-}
-
-func newSplitter(options ...splitterOption) *splitter {
- splitter := &splitter{
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
- splitter = option(splitter)
+ option(&s)
}
- return splitter
+ return s
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
-func withPostSplitInitialismCheck(s *splitter) *splitter {
+func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
+}
+
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
return s
}
-type (
- initialismMatch struct {
- start, end int
- body []rune
- complete bool
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
}
- initialismMatches []*initialismMatch
-)
-func (s *splitter) toNameLexems(name string) []nameLexem {
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
return s.mapMatchesToNameLexems(nameRunes, matches)
}
-func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
- matches := make(initialismMatches, 0)
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
- newMatches := make(initialismMatches, 0, len(matches))
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
- for _, match := range matches {
- if keepCompleteMatch := match.complete; keepCompleteMatch {
- newMatches = append(newMatches, match)
- continue
- }
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
+ }
- // drop failed match
- currentMatchRune := match.body[currentRunePosition-match.start]
- if !s.initialismRuneEqual(currentMatchRune, currentRune) {
- continue
- }
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
- // try to complete ongoing match
- if currentRunePosition-match.start == len(match.body)-1 {
- // we are close; the next step is to check the symbol ahead
- // if it is a small letter, then it is not the end of match
- // but beginning of the next word
-
- if currentRunePosition < len(nameRunes)-1 {
- nextRune := nameRunes[currentRunePosition+1]
- if newWord := unicode.IsLower(nextRune); newWord {
- // oh ok, it was the start of a new word
- continue
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
}
+
+ match.complete = true
+ match.end = currentRunePosition
}
- match.complete = true
- match.end = currentRunePosition
+ *newMatches = append(*newMatches, match)
}
-
- newMatches = append(newMatches, match)
}
// check for new initialism matches
- for _, initialism := range s.initialisms {
- initialismRunes := []rune(initialism)
- if s.initialismRuneEqual(initialismRunes[0], currentRune) {
- newMatches = append(newMatches, &initialismMatch{
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
matches = newMatches
}
+ // up to the caller to redeem this last slice
return matches
}
-func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
- nameLexems := make([]nameLexem, 0)
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
- var lastAcceptedMatch *initialismMatch
- for _, match := range matches {
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
if !match.complete {
continue
}
- if firstMatch := lastAcceptedMatch == nil; firstMatch {
- nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
- nameLexems = append(nameLexems, s.breakCasualString(middle)...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
- if lastAcceptedMatch == nil {
- return s.breakCasualString(nameRunes)
- }
-
- if lastAcceptedMatch.end+1 != len(nameRunes) {
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
- nameLexems = append(nameLexems, s.breakCasualString(rest)...)
+ s.appendBrokenDownCasualString(nameLexems, rest)
}
- return nameLexems
-}
+ poolOfMatches.RedeemMatches(matches)
-func (s *splitter) initialismRuneEqual(a, b rune) bool {
- return a == b
+ return nameLexems
}
-func (s *splitter) breakInitialism(original string) nameLexem {
+func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
-func (s *splitter) breakCasualString(str []rune) []nameLexem {
- segments := make([]nameLexem, 0)
- currentSegment := ""
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
addCasualNameLexem := func(original string) {
- segments = append(segments, newCasualNameLexem(original))
+ *segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
- segments = append(segments, newInitialismNameLexem(original, match))
+ *segments = append(*segments, newInitialismNameLexem(original, match))
}
- addNameLexem := func(original string) {
- if s.postSplitInitialismCheck {
- for _, initialism := range s.initialisms {
- if upper(initialism) == upper(original) {
- addInitialismNameLexem(original, initialism)
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
return
}
}
- }
- addCasualNameLexem(original)
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
}
- for _, rn := range string(str) {
- if replace, found := nameReplaceTable[rn]; found {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
- currentSegment = ""
+ currentSegment.Reset()
}
- currentSegment += string(rn)
+ currentSegment.WriteRune(rn)
+ }
+
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ i += size
}
- return segments
+ return true
}
diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 0000000000..90745d5ca9
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
index f78ab684a0..5051401c49 100644
--- a/vendor/github.com/go-openapi/swag/util.go
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
+ "unicode/utf8"
)
-// commonInitialisms are common acronyms that are kept as whole uppercased words.
-var commonInitialisms *indexOfInitialisms
-
-// initialisms is a slice of sorted initialisms
-var initialisms []string
-
-var isInitialism func(string) bool
-
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
-func init() {
- // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
- var configuredInitialisms = map[string]bool{
- "ACL": true,
- "API": true,
- "ASCII": true,
- "CPU": true,
- "CSS": true,
- "DNS": true,
- "EOF": true,
- "GUID": true,
- "HTML": true,
- "HTTPS": true,
- "HTTP": true,
- "ID": true,
- "IP": true,
- "IPv4": true,
- "IPv6": true,
- "JSON": true,
- "LHS": true,
- "OAI": true,
- "QPS": true,
- "RAM": true,
- "RHS": true,
- "RPC": true,
- "SLA": true,
- "SMTP": true,
- "SQL": true,
- "SSH": true,
- "TCP": true,
- "TLS": true,
- "TTL": true,
- "UDP": true,
- "UI": true,
- "UID": true,
- "UUID": true,
- "URI": true,
- "URL": true,
- "UTF8": true,
- "VM": true,
- "XML": true,
- "XMPP": true,
- "XSRF": true,
- "XSS": true,
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
}
- // a thread-safe index of initialisms
- commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
- initialisms = commonInitialisms.sorted()
-
- // a test function
- isInitialism = commonInitialisms.isInitialism
+ return GoNamePrefixFunc(name) + in
}
const (
@@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
-type byInitialism []string
-
-func (s byInitialism) Len() int {
- return len(s)
-}
-func (s byInitialism) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s byInitialism) Less(i, j int) bool {
- if len(s[i]) != len(s[j]) {
- return len(s[i]) < len(s[j])
- }
-
- return strings.Compare(s[i], s[j]) > 0
-}
-
// Removes leading whitespaces
func trim(str string) string {
- return strings.Trim(str, " ")
+ return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
-func Camelize(word string) (camelized string) {
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
for pos, ru := range []rune(word) {
if pos > 0 {
- camelized += string(unicode.ToLower(ru))
+ camelized.WriteRune(unicode.ToLower(ru))
} else {
- camelized += string(unicode.ToUpper(ru))
+ camelized.WriteRune(unicode.ToUpper(ru))
}
}
- return
+ return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
- out := make([]string, 0, len(in))
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
- for _, w := range in {
+ for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
- out = append(out, w.GetOriginal())
+ out = append(out, trim(w.GetOriginal()))
}
}
+ poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
- out := make([]string, 0, len(in))
- for _, w := range in {
- original := w.GetOriginal()
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
+ poolOfLexems.RedeemLexems(in)
+
return strings.Join(out, " ")
}
@@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
- out = append(out, Camelize(w))
+ out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
- lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
+
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
- result := ""
- for _, lexem := range lexems {
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
+
+ for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
- result += goName
+ result.WriteString(goName)
}
- if len(result) > 0 {
- // Only prefix with X when the first character isn't an ascii letter
- first := []rune(result)[0]
- if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
- if GoNamePrefixFunc == nil {
- return "X" + result
- }
- result = GoNamePrefixFunc(name) + result
- }
- first = []rune(result)[0]
- if unicode.IsLetter(first) && !unicode.IsUpper(first) {
- result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
- }
- }
-
- return result
+ return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -341,13 +321,22 @@ type zeroable interface {
// IsZero returns true when the value passed into the function is a zero value.
// This allows for safer checking of interface values.
func IsZero(data interface{}) bool {
+ v := reflect.ValueOf(data)
+ // check for nil data
+ switch v.Kind() { //nolint:exhaustive
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ if v.IsNil() {
+ return true
+ }
+ }
+
// check for things that have an IsZero method instead
if vv, ok := data.(zeroable); ok {
return vv.IsZero()
}
+
// continue with slightly more complex reflection
- v := reflect.ValueOf(data)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.String:
return v.Len() == 0
case reflect.Bool:
@@ -358,24 +347,13 @@ func IsZero(data interface{}) bool {
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
case reflect.Struct, reflect.Array:
return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
case reflect.Invalid:
return true
+ default:
+ return false
}
- return false
-}
-
-// AddInitialisms add additional initialisms
-func AddInitialisms(words ...string) {
- for _, word := range words {
- // commonInitialisms[upper(word)] = true
- commonInitialisms.add(upper(word))
- }
- // sort again
- initialisms = commonInitialisms.sorted()
}
// CommandLineOptionsGroup represents a group of user-defined command line options
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
index f09ee609f3..f59e025932 100644
--- a/vendor/github.com/go-openapi/swag/yaml.go
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -16,8 +16,11 @@ package swag
import (
"encoding/json"
+ "errors"
"fmt"
"path/filepath"
+ "reflect"
+ "sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
- return nil, fmt.Errorf("only YAML documents that are objects are supported")
+ return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}
@@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
case yamlTimestamp:
return node.Value, nil
case yamlNull:
- return nil, nil
+ return nil, nil //nolint:nilnil
default:
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
}
@@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
- for k, v := range val {
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T", val)
}
- return nil, nil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/.editorconfig
rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig
diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/.gitattributes
rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes
diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/.gitignore
rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore
diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
similarity index 95%
rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md
rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
index 61d8ebffc3..2ce45dd4ec 100644
--- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md
+++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## Release 3.2.3 (2022-11-29)
+
+### Changed
+
+- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
+- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
+- #353: Updated masterminds/semver which included bug fixes
+- #354: Updated golang.org/x/crypto which included bug fixes
+
+## Release 3.2.2 (2021-02-04)
+
+This is a re-release of 3.2.1 to satisfy something with the Go module system.
+
+## Release 3.2.1 (2021-02-04)
+
+### Changed
+
+- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
+
## Release 3.2.0 (2020-12-14)
### Added
diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt
rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md
similarity index 88%
rename from vendor/github.com/go-task/slim-sprig/README.md
rename to vendor/github.com/go-task/slim-sprig/v3/README.md
index 72579471ff..b5ab564254 100644
--- a/vendor/github.com/go-task/slim-sprig/README.md
+++ b/vendor/github.com/go-task/slim-sprig/v3/README.md
@@ -1,4 +1,4 @@
-# Slim-Sprig: Template functions for Go templates [](https://godoc.org/github.com/go-task/slim-sprig) [](https://goreportcard.com/report/github.com/go-task/slim-sprig)
+# Slim-Sprig: Template functions for Go templates [](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
all functions that depend on external (non standard library) or crypto packages
diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
similarity index 89%
rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml
rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
index cdcfd223b7..8e6346bb19 100644
--- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml
+++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
@@ -1,6 +1,6 @@
# https://taskfile.dev
-version: '2'
+version: '3'
tasks:
default:
diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/crypto.go
rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go
diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/date.go
rename to vendor/github.com/go-task/slim-sprig/v3/date.go
diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/defaults.go
rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go
diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/dict.go
rename to vendor/github.com/go-task/slim-sprig/v3/dict.go
diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/doc.go
rename to vendor/github.com/go-task/slim-sprig/v3/doc.go
diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/functions.go
rename to vendor/github.com/go-task/slim-sprig/v3/functions.go
diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/list.go
rename to vendor/github.com/go-task/slim-sprig/v3/list.go
diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/network.go
rename to vendor/github.com/go-task/slim-sprig/v3/network.go
diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/numeric.go
rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go
diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/reflect.go
rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go
diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/regex.go
rename to vendor/github.com/go-task/slim-sprig/v3/regex.go
diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/strings.go
rename to vendor/github.com/go-task/slim-sprig/v3/strings.go
diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/url.go
rename to vendor/github.com/go-task/slim-sprig/v3/url.go
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
deleted file mode 100644
index fdff3fdb4c..0000000000
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "fmt"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-
- anypb "github.com/golang/protobuf/ptypes/any"
-)
-
-const urlPrefix = "type.googleapis.com/"
-
-// AnyMessageName returns the message name contained in an anypb.Any message.
-// Most type assertions should use the Is function instead.
-//
-// Deprecated: Call the any.MessageName method instead.
-func AnyMessageName(any *anypb.Any) (string, error) {
- name, err := anyMessageName(any)
- return string(name), err
-}
-func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
- if any == nil {
- return "", fmt.Errorf("message is nil")
- }
- name := protoreflect.FullName(any.TypeUrl)
- if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
- name = name[i+len("/"):]
- }
- if !name.IsValid() {
- return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
- }
- return name, nil
-}
-
-// MarshalAny marshals the given message m into an anypb.Any message.
-//
-// Deprecated: Call the anypb.New function instead.
-func MarshalAny(m proto.Message) (*anypb.Any, error) {
- switch dm := m.(type) {
- case DynamicAny:
- m = dm.Message
- case *DynamicAny:
- if dm == nil {
- return nil, proto.ErrNil
- }
- m = dm.Message
- }
- b, err := proto.Marshal(m)
- if err != nil {
- return nil, err
- }
- return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
-}
-
-// Empty returns a new message of the type specified in an anypb.Any message.
-// It returns protoregistry.NotFound if the corresponding message type could not
-// be resolved in the global registry.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
-// to resolve the message name and create a new instance of it.
-func Empty(any *anypb.Any) (proto.Message, error) {
- name, err := anyMessageName(any)
- if err != nil {
- return nil, err
- }
- mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
- if err != nil {
- return nil, err
- }
- return proto.MessageV1(mt.New().Interface()), nil
-}
-
-// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
-// into the provided message m. It returns an error if the target message
-// does not match the type in the Any message or if an unmarshal error occurs.
-//
-// The target message m may be a *DynamicAny message. If the underlying message
-// type could not be resolved, then this returns protoregistry.NotFound.
-//
-// Deprecated: Call the any.UnmarshalTo method instead.
-func UnmarshalAny(any *anypb.Any, m proto.Message) error {
- if dm, ok := m.(*DynamicAny); ok {
- if dm.Message == nil {
- var err error
- dm.Message, err = Empty(any)
- if err != nil {
- return err
- }
- }
- m = dm.Message
- }
-
- anyName, err := AnyMessageName(any)
- if err != nil {
- return err
- }
- msgName := proto.MessageName(m)
- if anyName != msgName {
- return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
- }
- return proto.Unmarshal(any.Value, m)
-}
-
-// Is reports whether the Any message contains a message of the specified type.
-//
-// Deprecated: Call the any.MessageIs method instead.
-func Is(any *anypb.Any, m proto.Message) bool {
- if any == nil || m == nil {
- return false
- }
- name := proto.MessageName(m)
- if !strings.HasSuffix(any.TypeUrl, name) {
- return false
- }
- return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in an anypb.Any message.
-// The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-// var x ptypes.DynamicAny
-// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-// fmt.Printf("unmarshaled message: %v", x.Message)
-//
-// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
-// the any message contents into a new instance of the underlying message.
-type DynamicAny struct{ proto.Message }
-
-func (m DynamicAny) String() string {
- if m.Message == nil {
- return ""
- }
- return m.Message.String()
-}
-func (m DynamicAny) Reset() {
- if m.Message == nil {
- return
- }
- m.Message.Reset()
-}
-func (m DynamicAny) ProtoMessage() {
- return
-}
-func (m DynamicAny) ProtoReflect() protoreflect.Message {
- if m.Message == nil {
- return nil
- }
- return dynamicAny{proto.MessageReflect(m.Message)}
-}
-
-type dynamicAny struct{ protoreflect.Message }
-
-func (m dynamicAny) Type() protoreflect.MessageType {
- return dynamicAnyType{m.Message.Type()}
-}
-func (m dynamicAny) New() protoreflect.Message {
- return dynamicAnyType{m.Message.Type()}.New()
-}
-func (m dynamicAny) Interface() protoreflect.ProtoMessage {
- return DynamicAny{proto.MessageV1(m.Message.Interface())}
-}
-
-type dynamicAnyType struct{ protoreflect.MessageType }
-
-func (t dynamicAnyType) New() protoreflect.Message {
- return dynamicAny{t.MessageType.New()}
-}
-func (t dynamicAnyType) Zero() protoreflect.Message {
- return dynamicAny{t.MessageType.Zero()}
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
deleted file mode 100644
index d3c33259d2..0000000000
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ptypes provides functionality for interacting with well-known types.
-//
-// Deprecated: Well-known types have specialized functionality directly
-// injected into the generated packages for each message type.
-// See the deprecation notice for each function for the suggested alternative.
-package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
deleted file mode 100644
index b2b55dd851..0000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "errors"
- "fmt"
- "time"
-
- durationpb "github.com/golang/protobuf/ptypes/duration"
-)
-
-// Range of google.protobuf.Duration as specified in duration.proto.
-// This is about 10,000 years in seconds.
-const (
- maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
- minSeconds = -maxSeconds
-)
-
-// Duration converts a durationpb.Duration to a time.Duration.
-// Duration returns an error if dur is invalid or overflows a time.Duration.
-//
-// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
-func Duration(dur *durationpb.Duration) (time.Duration, error) {
- if err := validateDuration(dur); err != nil {
- return 0, err
- }
- d := time.Duration(dur.Seconds) * time.Second
- if int64(d/time.Second) != dur.Seconds {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
- }
- if dur.Nanos != 0 {
- d += time.Duration(dur.Nanos) * time.Nanosecond
- if (d < 0) != (dur.Nanos < 0) {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
- }
- }
- return d, nil
-}
-
-// DurationProto converts a time.Duration to a durationpb.Duration.
-//
-// Deprecated: Call the durationpb.New function instead.
-func DurationProto(d time.Duration) *durationpb.Duration {
- nanos := d.Nanoseconds()
- secs := nanos / 1e9
- nanos -= secs * 1e9
- return &durationpb.Duration{
- Seconds: int64(secs),
- Nanos: int32(nanos),
- }
-}
-
-// validateDuration determines whether the durationpb.Duration is valid
-// according to the definition in google/protobuf/duration.proto.
-// A valid durpb.Duration may still be too large to fit into a time.Duration
-// Note that the range of durationpb.Duration is about 10,000 years,
-// while the range of time.Duration is about 290 years.
-func validateDuration(dur *durationpb.Duration) error {
- if dur == nil {
- return errors.New("duration: nil Duration")
- }
- if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
- return fmt.Errorf("duration: %v: seconds out of range", dur)
- }
- if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
- return fmt.Errorf("duration: %v: nanos out of range", dur)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
- return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
deleted file mode 100644
index d0079ee3ef..0000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
-
-package duration
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/duration.proto.
-
-type Duration = durationpb.Duration
-
-var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
- 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
- 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
-func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
- if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
deleted file mode 100644
index 8368a3f70d..0000000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "errors"
- "fmt"
- "time"
-
- timestamppb "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-// Range of google.protobuf.Duration as specified in timestamp.proto.
-const (
- // Seconds field of the earliest valid Timestamp.
- // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- minValidSeconds = -62135596800
- // Seconds field just after the latest valid Timestamp.
- // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- maxValidSeconds = 253402300800
-)
-
-// Timestamp converts a timestamppb.Timestamp to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return
-// value is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-//
-// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
-func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
- // Don't return the zero value on error, because corresponds to a valid
- // timestamp. Instead return whatever time.Unix gives us.
- var t time.Time
- if ts == nil {
- t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
- } else {
- t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
- }
- return t, validateTimestamp(ts)
-}
-
-// TimestampNow returns a google.protobuf.Timestamp for the current time.
-//
-// Deprecated: Call the timestamppb.Now function instead.
-func TimestampNow() *timestamppb.Timestamp {
- ts, err := TimestampProto(time.Now())
- if err != nil {
- panic("ptypes: time.Now() out of Timestamp range")
- }
- return ts
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-//
-// Deprecated: Call the timestamppb.New function instead.
-func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
- ts := ×tamppb.Timestamp{
- Seconds: t.Unix(),
- Nanos: int32(t.Nanosecond()),
- }
- if err := validateTimestamp(ts); err != nil {
- return nil, err
- }
- return ts, nil
-}
-
-// TimestampString returns the RFC 3339 string for valid Timestamps.
-// For invalid Timestamps, it returns an error message in parentheses.
-//
-// Deprecated: Call the ts.AsTime method instead,
-// followed by a call to the Format method on the time.Time value.
-func TimestampString(ts *timestamppb.Timestamp) string {
- t, err := Timestamp(ts)
- if err != nil {
- return fmt.Sprintf("(%v)", err)
- }
- return t.Format(time.RFC3339Nano)
-}
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
-// and has a Nanos field in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes the problem.
-//
-// Every valid Timestamp can be represented by a time.Time,
-// but the converse is not true.
-func validateTimestamp(ts *timestamppb.Timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
diff --git a/vendor/github.com/google/gnostic-models/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go
index 1bfe961219..26b31e51e3 100644
--- a/vendor/github.com/google/gnostic-models/compiler/context.go
+++ b/vendor/github.com/google/gnostic-models/compiler/context.go
@@ -15,7 +15,7 @@
package compiler
import (
- yaml "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
)
// Context contains state of the compiler as it traverses a document.
diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go
index 250c81e8c8..efa07f2a90 100644
--- a/vendor/github.com/google/gnostic-models/compiler/extensions.go
+++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go
@@ -20,9 +20,9 @@ import (
"os/exec"
"strings"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
- yaml "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/anypb"
extensions "github.com/google/gnostic-models/extensions"
)
@@ -33,7 +33,7 @@ type ExtensionHandler struct {
}
// CallExtension calls a binary extension handler.
-func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
+func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) {
if context == nil || context.ExtensionHandlers == nil {
return false, nil, nil
}
@@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl
return handled, response, err
}
-func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
+func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) {
if extensionHandlers.Name != "" {
yamlData, _ := yaml.Marshal(in)
request := &extensions.ExtensionHandlerRequest{
diff --git a/vendor/github.com/google/gnostic-models/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go
index 975d65e8f8..a83261eb6c 100644
--- a/vendor/github.com/google/gnostic-models/compiler/helpers.go
+++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go
@@ -20,7 +20,7 @@ import (
"sort"
"strconv"
- "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/jsonschema"
)
diff --git a/vendor/github.com/google/gnostic-models/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go
index be0e8b40c8..da409d6b36 100644
--- a/vendor/github.com/google/gnostic-models/compiler/reader.go
+++ b/vendor/github.com/google/gnostic-models/compiler/reader.go
@@ -24,7 +24,7 @@ import (
"strings"
"sync"
- yaml "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
)
var verboseReader = false
diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
index a71df8abec..16c40d985f 100644
--- a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
+++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.19.3
+// protoc-gen-go v1.35.1
+// protoc v4.23.4
// source: extensions/extension.proto
package gnostic_extension_v1
@@ -51,11 +51,9 @@ type Version struct {
func (x *Version) Reset() {
*x = Version{}
- if protoimpl.UnsafeEnabled {
- mi := &file_extensions_extension_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_extensions_extension_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Version) String() string {
@@ -66,7 +64,7 @@ func (*Version) ProtoMessage() {}
func (x *Version) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -123,11 +121,9 @@ type ExtensionHandlerRequest struct {
func (x *ExtensionHandlerRequest) Reset() {
*x = ExtensionHandlerRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_extensions_extension_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_extensions_extension_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionHandlerRequest) String() string {
@@ -138,7 +134,7 @@ func (*ExtensionHandlerRequest) ProtoMessage() {}
func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -191,11 +187,9 @@ type ExtensionHandlerResponse struct {
func (x *ExtensionHandlerResponse) Reset() {
*x = ExtensionHandlerResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_extensions_extension_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_extensions_extension_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionHandlerResponse) String() string {
@@ -206,7 +200,7 @@ func (*ExtensionHandlerResponse) ProtoMessage() {}
func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -257,11 +251,9 @@ type Wrapper struct {
func (x *Wrapper) Reset() {
*x = Wrapper{}
- if protoimpl.UnsafeEnabled {
- mi := &file_extensions_extension_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_extensions_extension_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Wrapper) String() string {
@@ -272,7 +264,7 @@ func (*Wrapper) ProtoMessage() {}
func (x *Wrapper) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -367,7 +359,7 @@ func file_extensions_extension_proto_rawDescGZIP() []byte {
}
var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_extensions_extension_proto_goTypes = []interface{}{
+var file_extensions_extension_proto_goTypes = []any{
(*Version)(nil), // 0: gnostic.extension.v1.Version
(*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
@@ -390,56 +382,6 @@ func file_extensions_extension_proto_init() {
if File_extensions_extension_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Version); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExtensionHandlerRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExtensionHandlerResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Wrapper); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/github.com/google/gnostic-models/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go
index ec8afd0092..0768163e5a 100644
--- a/vendor/github.com/google/gnostic-models/extensions/extensions.go
+++ b/vendor/github.com/google/gnostic-models/extensions/extensions.go
@@ -19,8 +19,8 @@ import (
"log"
"os"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/anypb"
)
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
@@ -54,7 +54,7 @@ func Main(handler extensionHandler) {
response.Errors = append(response.Errors, err.Error())
} else if handled {
response.Handled = true
- response.Value, err = ptypes.MarshalAny(output)
+ response.Value, err = anypb.New(output)
if err != nil {
response.Errors = append(response.Errors, err.Error())
}
diff --git a/vendor/github.com/google/gnostic-models/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go
index 4781bdc5f5..a42b8e0035 100644
--- a/vendor/github.com/google/gnostic-models/jsonschema/models.go
+++ b/vendor/github.com/google/gnostic-models/jsonschema/models.go
@@ -16,7 +16,7 @@
// of JSON Schemas.
package jsonschema
-import "gopkg.in/yaml.v3"
+import "go.yaml.in/yaml/v3"
// The Schema struct models a JSON Schema and, because schemas are
// defined hierarchically, contains many references to itself.
diff --git a/vendor/github.com/google/gnostic-models/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go
index b8583d4660..4f1fe0c08c 100644
--- a/vendor/github.com/google/gnostic-models/jsonschema/reader.go
+++ b/vendor/github.com/google/gnostic-models/jsonschema/reader.go
@@ -21,7 +21,7 @@ import (
"io/ioutil"
"strconv"
- "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
)
// This is a global map of all known Schemas.
diff --git a/vendor/github.com/google/gnostic-models/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go
index 340dc5f933..19f5ddeae2 100644
--- a/vendor/github.com/google/gnostic-models/jsonschema/writer.go
+++ b/vendor/github.com/google/gnostic-models/jsonschema/writer.go
@@ -17,7 +17,7 @@ package jsonschema
import (
"fmt"
- "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
)
const indentation = " "
diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
index d71fe6d545..de337d80c8 100644
--- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
+++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
@@ -21,7 +21,7 @@ import (
"regexp"
"strings"
- "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)
@@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem")
+ message := "contains an invalid AdditionalPropertiesItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -2543,7 +2543,7 @@ func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyPara
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid NonBodyParameter")
+ message := "contains an invalid NonBodyParameter"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -3271,7 +3271,7 @@ func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error)
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid Parameter")
+ message := "contains an invalid Parameter"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -3345,7 +3345,7 @@ func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersIte
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid ParametersItem")
+ message := "contains an invalid ParametersItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -4561,7 +4561,7 @@ func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue,
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid ResponseValue")
+ message := "contains an invalid ResponseValue"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -5030,7 +5030,7 @@ func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid SchemaItem")
+ message := "contains an invalid SchemaItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -5160,7 +5160,7 @@ func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*Secu
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem")
+ message := "contains an invalid SecurityDefinitionsItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -6930,7 +6930,7 @@ func (m *BodyParameter) ToRawInfo() *yaml.Node {
// always include this required field.
info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
- if m.Required != false {
+ if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@@ -7149,7 +7149,7 @@ func (m *FileSchema) ToRawInfo() *yaml.Node {
// always include this required field.
info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
- if m.ReadOnly != false {
+ if m.ReadOnly {
info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
}
@@ -7176,7 +7176,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
- if m.Required != false {
+ if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@@ -7192,7 +7192,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
- if m.AllowEmptyValue != false {
+ if m.AllowEmptyValue {
info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
}
@@ -7220,7 +7220,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
- if m.ExclusiveMaximum != false {
+ if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@@ -7228,7 +7228,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
- if m.ExclusiveMinimum != false {
+ if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@@ -7252,7 +7252,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
- if m.UniqueItems != false {
+ if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@@ -7306,7 +7306,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
- if m.ExclusiveMaximum != false {
+ if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@@ -7314,7 +7314,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
- if m.ExclusiveMinimum != false {
+ if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@@ -7338,7 +7338,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
- if m.UniqueItems != false {
+ if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@@ -7373,7 +7373,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
- if m.Required != false {
+ if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@@ -7413,7 +7413,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
- if m.ExclusiveMaximum != false {
+ if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@@ -7421,7 +7421,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
- if m.ExclusiveMinimum != false {
+ if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@@ -7445,7 +7445,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
- if m.UniqueItems != false {
+ if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@@ -7940,7 +7940,7 @@ func (m *Operation) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes"))
info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes))
}
- if m.Deprecated != false {
+ if m.Deprecated {
info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated))
}
@@ -8110,7 +8110,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
- if m.ExclusiveMaximum != false {
+ if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@@ -8118,7 +8118,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
- if m.ExclusiveMinimum != false {
+ if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@@ -8142,7 +8142,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
- if m.UniqueItems != false {
+ if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@@ -8218,7 +8218,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
- if m.ExclusiveMaximum != false {
+ if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@@ -8226,7 +8226,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
- if m.ExclusiveMinimum != false {
+ if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@@ -8250,7 +8250,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
- if m.UniqueItems != false {
+ if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@@ -8296,7 +8296,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
- if m.Required != false {
+ if m.Required {
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
}
@@ -8312,7 +8312,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
- if m.AllowEmptyValue != false {
+ if m.AllowEmptyValue {
info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
}
@@ -8340,7 +8340,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
- if m.ExclusiveMaximum != false {
+ if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@@ -8348,7 +8348,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
- if m.ExclusiveMinimum != false {
+ if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@@ -8372,7 +8372,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
- if m.UniqueItems != false {
+ if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@@ -8514,7 +8514,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
}
- if m.ExclusiveMaximum != false {
+ if m.ExclusiveMaximum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
}
@@ -8522,7 +8522,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
}
- if m.ExclusiveMinimum != false {
+ if m.ExclusiveMinimum {
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
}
@@ -8546,7 +8546,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
}
- if m.UniqueItems != false {
+ if m.UniqueItems {
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
}
@@ -8610,7 +8610,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator))
}
- if m.ReadOnly != false {
+ if m.ReadOnly {
info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
}
@@ -8796,11 +8796,11 @@ func (m *Xml) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix))
}
- if m.Attribute != false {
+ if m.Attribute {
info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute))
}
- if m.Wrapped != false {
+ if m.Wrapped {
info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped"))
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped))
}
diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
index 65c4c913ce..3b930b3de2 100644
--- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
+++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
@@ -16,8 +16,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.19.3
+// protoc-gen-go v1.35.1
+// protoc v4.23.4
// source: openapiv2/OpenAPIv2.proto
package openapi_v2
@@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *AdditionalPropertiesItem_Schema
// *AdditionalPropertiesItem_Boolean
Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
@@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct {
func (x *AdditionalPropertiesItem) Reset() {
*x = AdditionalPropertiesItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AdditionalPropertiesItem) String() string {
@@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {}
func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -128,11 +127,9 @@ type Any struct {
func (x *Any) Reset() {
*x = Any{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Any) String() string {
@@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {}
func (x *Any) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -186,11 +183,9 @@ type ApiKeySecurity struct {
func (x *ApiKeySecurity) Reset() {
*x = ApiKeySecurity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ApiKeySecurity) String() string {
@@ -201,7 +196,7 @@ func (*ApiKeySecurity) ProtoMessage() {}
func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -263,11 +258,9 @@ type BasicAuthenticationSecurity struct {
func (x *BasicAuthenticationSecurity) Reset() {
*x = BasicAuthenticationSecurity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BasicAuthenticationSecurity) String() string {
@@ -278,7 +271,7 @@ func (*BasicAuthenticationSecurity) ProtoMessage() {}
func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -333,11 +326,9 @@ type BodyParameter struct {
func (x *BodyParameter) Reset() {
*x = BodyParameter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BodyParameter) String() string {
@@ -348,7 +339,7 @@ func (*BodyParameter) ProtoMessage() {}
func (x *BodyParameter) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -422,11 +413,9 @@ type Contact struct {
func (x *Contact) Reset() {
*x = Contact{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Contact) String() string {
@@ -437,7 +426,7 @@ func (*Contact) ProtoMessage() {}
func (x *Contact) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -490,11 +479,9 @@ type Default struct {
func (x *Default) Reset() {
*x = Default{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Default) String() string {
@@ -505,7 +492,7 @@ func (*Default) ProtoMessage() {}
func (x *Default) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -538,11 +525,9 @@ type Definitions struct {
func (x *Definitions) Reset() {
*x = Definitions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Definitions) String() string {
@@ -553,7 +538,7 @@ func (*Definitions) ProtoMessage() {}
func (x *Definitions) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -606,11 +591,9 @@ type Document struct {
func (x *Document) Reset() {
*x = Document{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Document) String() string {
@@ -621,7 +604,7 @@ func (*Document) ProtoMessage() {}
func (x *Document) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -758,11 +741,9 @@ type Examples struct {
func (x *Examples) Reset() {
*x = Examples{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Examples) String() string {
@@ -773,7 +754,7 @@ func (*Examples) ProtoMessage() {}
func (x *Examples) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -808,11 +789,9 @@ type ExternalDocs struct {
func (x *ExternalDocs) Reset() {
*x = ExternalDocs{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExternalDocs) String() string {
@@ -823,7 +802,7 @@ func (*ExternalDocs) ProtoMessage() {}
func (x *ExternalDocs) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -879,11 +858,9 @@ type FileSchema struct {
func (x *FileSchema) Reset() {
*x = FileSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileSchema) String() string {
@@ -894,7 +871,7 @@ func (*FileSchema) ProtoMessage() {}
func (x *FileSchema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1016,11 +993,9 @@ type FormDataParameterSubSchema struct {
func (x *FormDataParameterSubSchema) Reset() {
*x = FormDataParameterSubSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FormDataParameterSubSchema) String() string {
@@ -1031,7 +1006,7 @@ func (*FormDataParameterSubSchema) ProtoMessage() {}
func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1235,11 +1210,9 @@ type Header struct {
func (x *Header) Reset() {
*x = Header{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Header) String() string {
@@ -1250,7 +1223,7 @@ func (*Header) ProtoMessage() {}
func (x *Header) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1433,11 +1406,9 @@ type HeaderParameterSubSchema struct {
func (x *HeaderParameterSubSchema) Reset() {
*x = HeaderParameterSubSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HeaderParameterSubSchema) String() string {
@@ -1448,7 +1419,7 @@ func (*HeaderParameterSubSchema) ProtoMessage() {}
func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1627,11 +1598,9 @@ type Headers struct {
func (x *Headers) Reset() {
*x = Headers{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Headers) String() string {
@@ -1642,7 +1611,7 @@ func (*Headers) ProtoMessage() {}
func (x *Headers) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1685,11 +1654,9 @@ type Info struct {
func (x *Info) Reset() {
*x = Info{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Info) String() string {
@@ -1700,7 +1667,7 @@ func (*Info) ProtoMessage() {}
func (x *Info) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1774,11 +1741,9 @@ type ItemsItem struct {
func (x *ItemsItem) Reset() {
*x = ItemsItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ItemsItem) String() string {
@@ -1789,7 +1754,7 @@ func (*ItemsItem) ProtoMessage() {}
func (x *ItemsItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1822,11 +1787,9 @@ type JsonReference struct {
func (x *JsonReference) Reset() {
*x = JsonReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *JsonReference) String() string {
@@ -1837,7 +1800,7 @@ func (*JsonReference) ProtoMessage() {}
func (x *JsonReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1880,11 +1843,9 @@ type License struct {
func (x *License) Reset() {
*x = License{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *License) String() string {
@@ -1895,7 +1856,7 @@ func (*License) ProtoMessage() {}
func (x *License) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1945,11 +1906,9 @@ type NamedAny struct {
func (x *NamedAny) Reset() {
*x = NamedAny{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedAny) String() string {
@@ -1960,7 +1919,7 @@ func (*NamedAny) ProtoMessage() {}
func (x *NamedAny) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2003,11 +1962,9 @@ type NamedHeader struct {
func (x *NamedHeader) Reset() {
*x = NamedHeader{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedHeader) String() string {
@@ -2018,7 +1975,7 @@ func (*NamedHeader) ProtoMessage() {}
func (x *NamedHeader) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2061,11 +2018,9 @@ type NamedParameter struct {
func (x *NamedParameter) Reset() {
*x = NamedParameter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedParameter) String() string {
@@ -2076,7 +2031,7 @@ func (*NamedParameter) ProtoMessage() {}
func (x *NamedParameter) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2119,11 +2074,9 @@ type NamedPathItem struct {
func (x *NamedPathItem) Reset() {
*x = NamedPathItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedPathItem) String() string {
@@ -2134,7 +2087,7 @@ func (*NamedPathItem) ProtoMessage() {}
func (x *NamedPathItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2177,11 +2130,9 @@ type NamedResponse struct {
func (x *NamedResponse) Reset() {
*x = NamedResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedResponse) String() string {
@@ -2192,7 +2143,7 @@ func (*NamedResponse) ProtoMessage() {}
func (x *NamedResponse) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2235,11 +2186,9 @@ type NamedResponseValue struct {
func (x *NamedResponseValue) Reset() {
*x = NamedResponseValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedResponseValue) String() string {
@@ -2250,7 +2199,7 @@ func (*NamedResponseValue) ProtoMessage() {}
func (x *NamedResponseValue) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2293,11 +2242,9 @@ type NamedSchema struct {
func (x *NamedSchema) Reset() {
*x = NamedSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedSchema) String() string {
@@ -2308,7 +2255,7 @@ func (*NamedSchema) ProtoMessage() {}
func (x *NamedSchema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2351,11 +2298,9 @@ type NamedSecurityDefinitionsItem struct {
func (x *NamedSecurityDefinitionsItem) Reset() {
*x = NamedSecurityDefinitionsItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedSecurityDefinitionsItem) String() string {
@@ -2366,7 +2311,7 @@ func (*NamedSecurityDefinitionsItem) ProtoMessage() {}
func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2409,11 +2354,9 @@ type NamedString struct {
func (x *NamedString) Reset() {
*x = NamedString{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedString) String() string {
@@ -2424,7 +2367,7 @@ func (*NamedString) ProtoMessage() {}
func (x *NamedString) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2467,11 +2410,9 @@ type NamedStringArray struct {
func (x *NamedStringArray) Reset() {
*x = NamedStringArray{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedStringArray) String() string {
@@ -2482,7 +2423,7 @@ func (*NamedStringArray) ProtoMessage() {}
func (x *NamedStringArray) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2517,6 +2458,7 @@ type NonBodyParameter struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *NonBodyParameter_HeaderParameterSubSchema
// *NonBodyParameter_FormDataParameterSubSchema
// *NonBodyParameter_QueryParameterSubSchema
@@ -2526,11 +2468,9 @@ type NonBodyParameter struct {
func (x *NonBodyParameter) Reset() {
*x = NonBodyParameter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NonBodyParameter) String() string {
@@ -2541,7 +2481,7 @@ func (*NonBodyParameter) ProtoMessage() {}
func (x *NonBodyParameter) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2635,11 +2575,9 @@ type Oauth2AccessCodeSecurity struct {
func (x *Oauth2AccessCodeSecurity) Reset() {
*x = Oauth2AccessCodeSecurity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Oauth2AccessCodeSecurity) String() string {
@@ -2650,7 +2588,7 @@ func (*Oauth2AccessCodeSecurity) ProtoMessage() {}
func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2729,11 +2667,9 @@ type Oauth2ApplicationSecurity struct {
func (x *Oauth2ApplicationSecurity) Reset() {
*x = Oauth2ApplicationSecurity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Oauth2ApplicationSecurity) String() string {
@@ -2744,7 +2680,7 @@ func (*Oauth2ApplicationSecurity) ProtoMessage() {}
func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2816,11 +2752,9 @@ type Oauth2ImplicitSecurity struct {
func (x *Oauth2ImplicitSecurity) Reset() {
*x = Oauth2ImplicitSecurity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Oauth2ImplicitSecurity) String() string {
@@ -2831,7 +2765,7 @@ func (*Oauth2ImplicitSecurity) ProtoMessage() {}
func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2903,11 +2837,9 @@ type Oauth2PasswordSecurity struct {
func (x *Oauth2PasswordSecurity) Reset() {
*x = Oauth2PasswordSecurity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Oauth2PasswordSecurity) String() string {
@@ -2918,7 +2850,7 @@ func (*Oauth2PasswordSecurity) ProtoMessage() {}
func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2985,11 +2917,9 @@ type Oauth2Scopes struct {
func (x *Oauth2Scopes) Reset() {
*x = Oauth2Scopes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Oauth2Scopes) String() string {
@@ -3000,7 +2930,7 @@ func (*Oauth2Scopes) ProtoMessage() {}
func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3051,11 +2981,9 @@ type Operation struct {
func (x *Operation) Reset() {
*x = Operation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Operation) String() string {
@@ -3066,7 +2994,7 @@ func (*Operation) ProtoMessage() {}
func (x *Operation) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3178,6 +3106,7 @@ type Parameter struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *Parameter_BodyParameter
// *Parameter_NonBodyParameter
Oneof isParameter_Oneof `protobuf_oneof:"oneof"`
@@ -3185,11 +3114,9 @@ type Parameter struct {
func (x *Parameter) Reset() {
*x = Parameter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Parameter) String() string {
@@ -3200,7 +3127,7 @@ func (*Parameter) ProtoMessage() {}
func (x *Parameter) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3263,11 +3190,9 @@ type ParameterDefinitions struct {
func (x *ParameterDefinitions) Reset() {
*x = ParameterDefinitions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ParameterDefinitions) String() string {
@@ -3278,7 +3203,7 @@ func (*ParameterDefinitions) ProtoMessage() {}
func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3306,6 +3231,7 @@ type ParametersItem struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *ParametersItem_Parameter
// *ParametersItem_JsonReference
Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"`
@@ -3313,11 +3239,9 @@ type ParametersItem struct {
func (x *ParametersItem) Reset() {
*x = ParametersItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ParametersItem) String() string {
@@ -3328,7 +3252,7 @@ func (*ParametersItem) ProtoMessage() {}
func (x *ParametersItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3400,11 +3324,9 @@ type PathItem struct {
func (x *PathItem) Reset() {
*x = PathItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PathItem) String() string {
@@ -3415,7 +3337,7 @@ func (*PathItem) ProtoMessage() {}
func (x *PathItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3535,11 +3457,9 @@ type PathParameterSubSchema struct {
func (x *PathParameterSubSchema) Reset() {
*x = PathParameterSubSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PathParameterSubSchema) String() string {
@@ -3550,7 +3470,7 @@ func (*PathParameterSubSchema) ProtoMessage() {}
func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3731,11 +3651,9 @@ type Paths struct {
func (x *Paths) Reset() {
*x = Paths{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Paths) String() string {
@@ -3746,7 +3664,7 @@ func (*Paths) ProtoMessage() {}
func (x *Paths) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3802,11 +3720,9 @@ type PrimitivesItems struct {
func (x *PrimitivesItems) Reset() {
*x = PrimitivesItems{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PrimitivesItems) String() string {
@@ -3817,7 +3733,7 @@ func (*PrimitivesItems) ProtoMessage() {}
func (x *PrimitivesItems) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3968,11 +3884,9 @@ type Properties struct {
func (x *Properties) Reset() {
*x = Properties{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Properties) String() string {
@@ -3983,7 +3897,7 @@ func (*Properties) ProtoMessage() {}
func (x *Properties) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4042,11 +3956,9 @@ type QueryParameterSubSchema struct {
func (x *QueryParameterSubSchema) Reset() {
*x = QueryParameterSubSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *QueryParameterSubSchema) String() string {
@@ -4057,7 +3969,7 @@ func (*QueryParameterSubSchema) ProtoMessage() {}
func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4247,11 +4159,9 @@ type Response struct {
func (x *Response) Reset() {
*x = Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Response) String() string {
@@ -4262,7 +4172,7 @@ func (*Response) ProtoMessage() {}
func (x *Response) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4323,11 +4233,9 @@ type ResponseDefinitions struct {
func (x *ResponseDefinitions) Reset() {
*x = ResponseDefinitions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ResponseDefinitions) String() string {
@@ -4338,7 +4246,7 @@ func (*ResponseDefinitions) ProtoMessage() {}
func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4366,6 +4274,7 @@ type ResponseValue struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *ResponseValue_Response
// *ResponseValue_JsonReference
Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"`
@@ -4373,11 +4282,9 @@ type ResponseValue struct {
func (x *ResponseValue) Reset() {
*x = ResponseValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ResponseValue) String() string {
@@ -4388,7 +4295,7 @@ func (*ResponseValue) ProtoMessage() {}
func (x *ResponseValue) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4452,11 +4359,9 @@ type Responses struct {
func (x *Responses) Reset() {
*x = Responses{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Responses) String() string {
@@ -4467,7 +4372,7 @@ func (*Responses) ProtoMessage() {}
func (x *Responses) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4537,11 +4442,9 @@ type Schema struct {
func (x *Schema) Reset() {
*x = Schema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Schema) String() string {
@@ -4552,7 +4455,7 @@ func (*Schema) ProtoMessage() {}
func (x *Schema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4790,6 +4693,7 @@ type SchemaItem struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *SchemaItem_Schema
// *SchemaItem_FileSchema
Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"`
@@ -4797,11 +4701,9 @@ type SchemaItem struct {
func (x *SchemaItem) Reset() {
*x = SchemaItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SchemaItem) String() string {
@@ -4812,7 +4714,7 @@ func (*SchemaItem) ProtoMessage() {}
func (x *SchemaItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4874,11 +4776,9 @@ type SecurityDefinitions struct {
func (x *SecurityDefinitions) Reset() {
*x = SecurityDefinitions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SecurityDefinitions) String() string {
@@ -4889,7 +4789,7 @@ func (*SecurityDefinitions) ProtoMessage() {}
func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4917,6 +4817,7 @@ type SecurityDefinitionsItem struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *SecurityDefinitionsItem_BasicAuthenticationSecurity
// *SecurityDefinitionsItem_ApiKeySecurity
// *SecurityDefinitionsItem_Oauth2ImplicitSecurity
@@ -4928,11 +4829,9 @@ type SecurityDefinitionsItem struct {
func (x *SecurityDefinitionsItem) Reset() {
*x = SecurityDefinitionsItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SecurityDefinitionsItem) String() string {
@@ -4943,7 +4842,7 @@ func (*SecurityDefinitionsItem) ProtoMessage() {}
func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5057,11 +4956,9 @@ type SecurityRequirement struct {
func (x *SecurityRequirement) Reset() {
*x = SecurityRequirement{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SecurityRequirement) String() string {
@@ -5072,7 +4969,7 @@ func (*SecurityRequirement) ProtoMessage() {}
func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5104,11 +5001,9 @@ type StringArray struct {
func (x *StringArray) Reset() {
*x = StringArray{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StringArray) String() string {
@@ -5119,7 +5014,7 @@ func (*StringArray) ProtoMessage() {}
func (x *StringArray) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5154,11 +5049,9 @@ type Tag struct {
func (x *Tag) Reset() {
*x = Tag{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Tag) String() string {
@@ -5169,7 +5062,7 @@ func (*Tag) ProtoMessage() {}
func (x *Tag) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5222,11 +5115,9 @@ type TypeItem struct {
func (x *TypeItem) Reset() {
*x = TypeItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *TypeItem) String() string {
@@ -5237,7 +5128,7 @@ func (*TypeItem) ProtoMessage() {}
func (x *TypeItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5270,11 +5161,9 @@ type VendorExtension struct {
func (x *VendorExtension) Reset() {
*x = VendorExtension{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *VendorExtension) String() string {
@@ -5285,7 +5174,7 @@ func (*VendorExtension) ProtoMessage() {}
func (x *VendorExtension) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5322,11 +5211,9 @@ type Xml struct {
func (x *Xml) Reset() {
*x = Xml{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Xml) String() string {
@@ -5337,7 +5224,7 @@ func (*Xml) ProtoMessage() {}
func (x *Xml) ProtoReflect() protoreflect.Message {
mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6356,7 +6243,7 @@ func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte {
}
var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60)
-var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{
+var file_openapiv2_OpenAPIv2_proto_goTypes = []any{
(*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem
(*Any)(nil), // 1: openapi.v2.Any
(*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity
@@ -6565,755 +6452,33 @@ func file_openapiv2_OpenAPIv2_proto_init() {
if File_openapiv2_OpenAPIv2_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AdditionalPropertiesItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Any); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApiKeySecurity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BasicAuthenticationSecurity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BodyParameter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Contact); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Default); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Definitions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Document); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Examples); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExternalDocs); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FileSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FormDataParameterSubSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Header); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HeaderParameterSubSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Headers); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Info); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ItemsItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*JsonReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*License); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedAny); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedHeader); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedParameter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedPathItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedResponseValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedSecurityDefinitionsItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedString); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedStringArray); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NonBodyParameter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Oauth2AccessCodeSecurity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Oauth2ApplicationSecurity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Oauth2ImplicitSecurity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Oauth2PasswordSecurity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Oauth2Scopes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Operation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Parameter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ParameterDefinitions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ParametersItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PathItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PathParameterSubSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Paths); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PrimitivesItems); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Properties); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueryParameterSubSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResponseDefinitions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResponseValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Responses); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SchemaItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SecurityDefinitions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SecurityDefinitionsItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SecurityRequirement); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StringArray); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Tag); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TypeItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*VendorExtension); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Xml); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []any{
(*AdditionalPropertiesItem_Schema)(nil),
(*AdditionalPropertiesItem_Boolean)(nil),
}
- file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{
+ file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []any{
(*NonBodyParameter_HeaderParameterSubSchema)(nil),
(*NonBodyParameter_FormDataParameterSubSchema)(nil),
(*NonBodyParameter_QueryParameterSubSchema)(nil),
(*NonBodyParameter_PathParameterSubSchema)(nil),
}
- file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{
+ file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []any{
(*Parameter_BodyParameter)(nil),
(*Parameter_NonBodyParameter)(nil),
}
- file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{
+ file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []any{
(*ParametersItem_Parameter)(nil),
(*ParametersItem_JsonReference)(nil),
}
- file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{
+ file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []any{
(*ResponseValue_Response)(nil),
(*ResponseValue_JsonReference)(nil),
}
- file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{
+ file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []any{
(*SchemaItem_Schema)(nil),
(*SchemaItem_FileSchema)(nil),
}
- file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{
+ file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []any{
(*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil),
(*SecurityDefinitionsItem_ApiKeySecurity)(nil),
(*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil),
diff --git a/vendor/github.com/google/gnostic-models/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go
index e96ac0d6da..89469a13ed 100644
--- a/vendor/github.com/google/gnostic-models/openapiv2/document.go
+++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go
@@ -15,7 +15,7 @@
package openapi_v2
import (
- "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go
index 4b1131ce1c..662772dd95 100644
--- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go
+++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go
@@ -21,7 +21,7 @@ import (
"regexp"
"strings"
- "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)
@@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem")
+ message := "contains an invalid AdditionalPropertiesItem"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -113,7 +113,7 @@ func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpress
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid AnyOrExpression")
+ message := "contains an invalid AnyOrExpression"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -227,7 +227,7 @@ func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*Callback
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid CallbackOrReference")
+ message := "contains an invalid CallbackOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -979,7 +979,7 @@ func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOr
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid ExampleOrReference")
+ message := "contains an invalid ExampleOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -1320,7 +1320,7 @@ func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrRe
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid HeaderOrReference")
+ message := "contains an invalid HeaderOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -1713,7 +1713,7 @@ func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrRefere
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid LinkOrReference")
+ message := "contains an invalid LinkOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -3090,7 +3090,7 @@ func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*Paramet
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid ParameterOrReference")
+ message := "contains an invalid ParameterOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -3606,7 +3606,7 @@ func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*Reque
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid RequestBodyOrReference")
+ message := "contains an invalid RequestBodyOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -3743,7 +3743,7 @@ func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*Response
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid ResponseOrReference")
+ message := "contains an invalid ResponseOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -4310,7 +4310,7 @@ func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrRe
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid SchemaOrReference")
+ message := "contains an invalid SchemaOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
@@ -4543,7 +4543,7 @@ func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*Se
// since the oneof matched one of its possibilities, discard any matching errors
errors = make([]error, 0)
} else {
- message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference")
+ message := "contains an invalid SecuritySchemeOrReference"
err := compiler.NewError(context, message)
errors = []error{err}
}
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
index 945b8d11ff..b9df95a379 100644
--- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
+++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
@@ -16,8 +16,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.19.3
+// protoc-gen-go v1.35.1
+// protoc v4.23.4
// source: openapiv3/OpenAPIv3.proto
package openapi_v3
@@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *AdditionalPropertiesItem_SchemaOrReference
// *AdditionalPropertiesItem_Boolean
Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
@@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct {
func (x *AdditionalPropertiesItem) Reset() {
*x = AdditionalPropertiesItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AdditionalPropertiesItem) String() string {
@@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {}
func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -128,11 +127,9 @@ type Any struct {
func (x *Any) Reset() {
*x = Any{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Any) String() string {
@@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {}
func (x *Any) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -178,6 +175,7 @@ type AnyOrExpression struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *AnyOrExpression_Any
// *AnyOrExpression_Expression
Oneof isAnyOrExpression_Oneof `protobuf_oneof:"oneof"`
@@ -185,11 +183,9 @@ type AnyOrExpression struct {
func (x *AnyOrExpression) Reset() {
*x = AnyOrExpression{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AnyOrExpression) String() string {
@@ -200,7 +196,7 @@ func (*AnyOrExpression) ProtoMessage() {}
func (x *AnyOrExpression) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -264,11 +260,9 @@ type Callback struct {
func (x *Callback) Reset() {
*x = Callback{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Callback) String() string {
@@ -279,7 +273,7 @@ func (*Callback) ProtoMessage() {}
func (x *Callback) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -314,6 +308,7 @@ type CallbackOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *CallbackOrReference_Callback
// *CallbackOrReference_Reference
Oneof isCallbackOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -321,11 +316,9 @@ type CallbackOrReference struct {
func (x *CallbackOrReference) Reset() {
*x = CallbackOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CallbackOrReference) String() string {
@@ -336,7 +329,7 @@ func (*CallbackOrReference) ProtoMessage() {}
func (x *CallbackOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -398,11 +391,9 @@ type CallbacksOrReferences struct {
func (x *CallbacksOrReferences) Reset() {
*x = CallbacksOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CallbacksOrReferences) String() string {
@@ -413,7 +404,7 @@ func (*CallbacksOrReferences) ProtoMessage() {}
func (x *CallbacksOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -455,11 +446,9 @@ type Components struct {
func (x *Components) Reset() {
*x = Components{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Components) String() string {
@@ -470,7 +459,7 @@ func (*Components) ProtoMessage() {}
func (x *Components) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -569,11 +558,9 @@ type Contact struct {
func (x *Contact) Reset() {
*x = Contact{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Contact) String() string {
@@ -584,7 +571,7 @@ func (*Contact) ProtoMessage() {}
func (x *Contact) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -633,6 +620,7 @@ type DefaultType struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *DefaultType_Number
// *DefaultType_Boolean
// *DefaultType_String_
@@ -641,11 +629,9 @@ type DefaultType struct {
func (x *DefaultType) Reset() {
*x = DefaultType{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DefaultType) String() string {
@@ -656,7 +642,7 @@ func (*DefaultType) ProtoMessage() {}
func (x *DefaultType) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -734,11 +720,9 @@ type Discriminator struct {
func (x *Discriminator) Reset() {
*x = Discriminator{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Discriminator) String() string {
@@ -749,7 +733,7 @@ func (*Discriminator) ProtoMessage() {}
func (x *Discriminator) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -803,11 +787,9 @@ type Document struct {
func (x *Document) Reset() {
*x = Document{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Document) String() string {
@@ -818,7 +800,7 @@ func (*Document) ProtoMessage() {}
func (x *Document) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -912,11 +894,9 @@ type Encoding struct {
func (x *Encoding) Reset() {
*x = Encoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Encoding) String() string {
@@ -927,7 +907,7 @@ func (*Encoding) ProtoMessage() {}
func (x *Encoding) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -994,11 +974,9 @@ type Encodings struct {
func (x *Encodings) Reset() {
*x = Encodings{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Encodings) String() string {
@@ -1009,7 +987,7 @@ func (*Encodings) ProtoMessage() {}
func (x *Encodings) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1045,11 +1023,9 @@ type Example struct {
func (x *Example) Reset() {
*x = Example{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Example) String() string {
@@ -1060,7 +1036,7 @@ func (*Example) ProtoMessage() {}
func (x *Example) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1116,6 +1092,7 @@ type ExampleOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *ExampleOrReference_Example
// *ExampleOrReference_Reference
Oneof isExampleOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -1123,11 +1100,9 @@ type ExampleOrReference struct {
func (x *ExampleOrReference) Reset() {
*x = ExampleOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExampleOrReference) String() string {
@@ -1138,7 +1113,7 @@ func (*ExampleOrReference) ProtoMessage() {}
func (x *ExampleOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1200,11 +1175,9 @@ type ExamplesOrReferences struct {
func (x *ExamplesOrReferences) Reset() {
*x = ExamplesOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExamplesOrReferences) String() string {
@@ -1215,7 +1188,7 @@ func (*ExamplesOrReferences) ProtoMessage() {}
func (x *ExamplesOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1247,11 +1220,9 @@ type Expression struct {
func (x *Expression) Reset() {
*x = Expression{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Expression) String() string {
@@ -1262,7 +1233,7 @@ func (*Expression) ProtoMessage() {}
func (x *Expression) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1297,11 +1268,9 @@ type ExternalDocs struct {
func (x *ExternalDocs) Reset() {
*x = ExternalDocs{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExternalDocs) String() string {
@@ -1312,7 +1281,7 @@ func (*ExternalDocs) ProtoMessage() {}
func (x *ExternalDocs) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1370,11 +1339,9 @@ type Header struct {
func (x *Header) Reset() {
*x = Header{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Header) String() string {
@@ -1385,7 +1352,7 @@ func (*Header) ProtoMessage() {}
func (x *Header) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1490,6 +1457,7 @@ type HeaderOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *HeaderOrReference_Header
// *HeaderOrReference_Reference
Oneof isHeaderOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -1497,11 +1465,9 @@ type HeaderOrReference struct {
func (x *HeaderOrReference) Reset() {
*x = HeaderOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HeaderOrReference) String() string {
@@ -1512,7 +1478,7 @@ func (*HeaderOrReference) ProtoMessage() {}
func (x *HeaderOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1574,11 +1540,9 @@ type HeadersOrReferences struct {
func (x *HeadersOrReferences) Reset() {
*x = HeadersOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HeadersOrReferences) String() string {
@@ -1589,7 +1553,7 @@ func (*HeadersOrReferences) ProtoMessage() {}
func (x *HeadersOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1629,11 +1593,9 @@ type Info struct {
func (x *Info) Reset() {
*x = Info{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Info) String() string {
@@ -1644,7 +1606,7 @@ func (*Info) ProtoMessage() {}
func (x *Info) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1725,11 +1687,9 @@ type ItemsItem struct {
func (x *ItemsItem) Reset() {
*x = ItemsItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ItemsItem) String() string {
@@ -1740,7 +1700,7 @@ func (*ItemsItem) ProtoMessage() {}
func (x *ItemsItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1775,11 +1735,9 @@ type License struct {
func (x *License) Reset() {
*x = License{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *License) String() string {
@@ -1790,7 +1748,7 @@ func (*License) ProtoMessage() {}
func (x *License) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1843,11 +1801,9 @@ type Link struct {
func (x *Link) Reset() {
*x = Link{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Link) String() string {
@@ -1858,7 +1814,7 @@ func (*Link) ProtoMessage() {}
func (x *Link) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1928,6 +1884,7 @@ type LinkOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *LinkOrReference_Link
// *LinkOrReference_Reference
Oneof isLinkOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -1935,11 +1892,9 @@ type LinkOrReference struct {
func (x *LinkOrReference) Reset() {
*x = LinkOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *LinkOrReference) String() string {
@@ -1950,7 +1905,7 @@ func (*LinkOrReference) ProtoMessage() {}
func (x *LinkOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2012,11 +1967,9 @@ type LinksOrReferences struct {
func (x *LinksOrReferences) Reset() {
*x = LinksOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *LinksOrReferences) String() string {
@@ -2027,7 +1980,7 @@ func (*LinksOrReferences) ProtoMessage() {}
func (x *LinksOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2064,11 +2017,9 @@ type MediaType struct {
func (x *MediaType) Reset() {
*x = MediaType{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MediaType) String() string {
@@ -2079,7 +2030,7 @@ func (*MediaType) ProtoMessage() {}
func (x *MediaType) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2139,11 +2090,9 @@ type MediaTypes struct {
func (x *MediaTypes) Reset() {
*x = MediaTypes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MediaTypes) String() string {
@@ -2154,7 +2103,7 @@ func (*MediaTypes) ProtoMessage() {}
func (x *MediaTypes) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2190,11 +2139,9 @@ type NamedAny struct {
func (x *NamedAny) Reset() {
*x = NamedAny{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedAny) String() string {
@@ -2205,7 +2152,7 @@ func (*NamedAny) ProtoMessage() {}
func (x *NamedAny) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2248,11 +2195,9 @@ type NamedCallbackOrReference struct {
func (x *NamedCallbackOrReference) Reset() {
*x = NamedCallbackOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedCallbackOrReference) String() string {
@@ -2263,7 +2208,7 @@ func (*NamedCallbackOrReference) ProtoMessage() {}
func (x *NamedCallbackOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2306,11 +2251,9 @@ type NamedEncoding struct {
func (x *NamedEncoding) Reset() {
*x = NamedEncoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedEncoding) String() string {
@@ -2321,7 +2264,7 @@ func (*NamedEncoding) ProtoMessage() {}
func (x *NamedEncoding) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2364,11 +2307,9 @@ type NamedExampleOrReference struct {
func (x *NamedExampleOrReference) Reset() {
*x = NamedExampleOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedExampleOrReference) String() string {
@@ -2379,7 +2320,7 @@ func (*NamedExampleOrReference) ProtoMessage() {}
func (x *NamedExampleOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2422,11 +2363,9 @@ type NamedHeaderOrReference struct {
func (x *NamedHeaderOrReference) Reset() {
*x = NamedHeaderOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedHeaderOrReference) String() string {
@@ -2437,7 +2376,7 @@ func (*NamedHeaderOrReference) ProtoMessage() {}
func (x *NamedHeaderOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2480,11 +2419,9 @@ type NamedLinkOrReference struct {
func (x *NamedLinkOrReference) Reset() {
*x = NamedLinkOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedLinkOrReference) String() string {
@@ -2495,7 +2432,7 @@ func (*NamedLinkOrReference) ProtoMessage() {}
func (x *NamedLinkOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2538,11 +2475,9 @@ type NamedMediaType struct {
func (x *NamedMediaType) Reset() {
*x = NamedMediaType{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedMediaType) String() string {
@@ -2553,7 +2488,7 @@ func (*NamedMediaType) ProtoMessage() {}
func (x *NamedMediaType) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2596,11 +2531,9 @@ type NamedParameterOrReference struct {
func (x *NamedParameterOrReference) Reset() {
*x = NamedParameterOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedParameterOrReference) String() string {
@@ -2611,7 +2544,7 @@ func (*NamedParameterOrReference) ProtoMessage() {}
func (x *NamedParameterOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2654,11 +2587,9 @@ type NamedPathItem struct {
func (x *NamedPathItem) Reset() {
*x = NamedPathItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedPathItem) String() string {
@@ -2669,7 +2600,7 @@ func (*NamedPathItem) ProtoMessage() {}
func (x *NamedPathItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2712,11 +2643,9 @@ type NamedRequestBodyOrReference struct {
func (x *NamedRequestBodyOrReference) Reset() {
*x = NamedRequestBodyOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedRequestBodyOrReference) String() string {
@@ -2727,7 +2656,7 @@ func (*NamedRequestBodyOrReference) ProtoMessage() {}
func (x *NamedRequestBodyOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2770,11 +2699,9 @@ type NamedResponseOrReference struct {
func (x *NamedResponseOrReference) Reset() {
*x = NamedResponseOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedResponseOrReference) String() string {
@@ -2785,7 +2712,7 @@ func (*NamedResponseOrReference) ProtoMessage() {}
func (x *NamedResponseOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2828,11 +2755,9 @@ type NamedSchemaOrReference struct {
func (x *NamedSchemaOrReference) Reset() {
*x = NamedSchemaOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedSchemaOrReference) String() string {
@@ -2843,7 +2768,7 @@ func (*NamedSchemaOrReference) ProtoMessage() {}
func (x *NamedSchemaOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2886,11 +2811,9 @@ type NamedSecuritySchemeOrReference struct {
func (x *NamedSecuritySchemeOrReference) Reset() {
*x = NamedSecuritySchemeOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedSecuritySchemeOrReference) String() string {
@@ -2901,7 +2824,7 @@ func (*NamedSecuritySchemeOrReference) ProtoMessage() {}
func (x *NamedSecuritySchemeOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2944,11 +2867,9 @@ type NamedServerVariable struct {
func (x *NamedServerVariable) Reset() {
*x = NamedServerVariable{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedServerVariable) String() string {
@@ -2959,7 +2880,7 @@ func (*NamedServerVariable) ProtoMessage() {}
func (x *NamedServerVariable) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3002,11 +2923,9 @@ type NamedString struct {
func (x *NamedString) Reset() {
*x = NamedString{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedString) String() string {
@@ -3017,7 +2936,7 @@ func (*NamedString) ProtoMessage() {}
func (x *NamedString) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3060,11 +2979,9 @@ type NamedStringArray struct {
func (x *NamedStringArray) Reset() {
*x = NamedStringArray{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NamedStringArray) String() string {
@@ -3075,7 +2992,7 @@ func (*NamedStringArray) ProtoMessage() {}
func (x *NamedStringArray) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3119,11 +3036,9 @@ type OauthFlow struct {
func (x *OauthFlow) Reset() {
*x = OauthFlow{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OauthFlow) String() string {
@@ -3134,7 +3049,7 @@ func (*OauthFlow) ProtoMessage() {}
func (x *OauthFlow) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3199,11 +3114,9 @@ type OauthFlows struct {
func (x *OauthFlows) Reset() {
*x = OauthFlows{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OauthFlows) String() string {
@@ -3214,7 +3127,7 @@ func (*OauthFlows) ProtoMessage() {}
func (x *OauthFlows) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3274,11 +3187,9 @@ type Object struct {
func (x *Object) Reset() {
*x = Object{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Object) String() string {
@@ -3289,7 +3200,7 @@ func (*Object) ProtoMessage() {}
func (x *Object) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3334,11 +3245,9 @@ type Operation struct {
func (x *Operation) Reset() {
*x = Operation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Operation) String() string {
@@ -3349,7 +3258,7 @@ func (*Operation) ProtoMessage() {}
func (x *Operation) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3479,11 +3388,9 @@ type Parameter struct {
func (x *Parameter) Reset() {
*x = Parameter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Parameter) String() string {
@@ -3494,7 +3401,7 @@ func (*Parameter) ProtoMessage() {}
func (x *Parameter) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3613,6 +3520,7 @@ type ParameterOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *ParameterOrReference_Parameter
// *ParameterOrReference_Reference
Oneof isParameterOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -3620,11 +3528,9 @@ type ParameterOrReference struct {
func (x *ParameterOrReference) Reset() {
*x = ParameterOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ParameterOrReference) String() string {
@@ -3635,7 +3541,7 @@ func (*ParameterOrReference) ProtoMessage() {}
func (x *ParameterOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3697,11 +3603,9 @@ type ParametersOrReferences struct {
func (x *ParametersOrReferences) Reset() {
*x = ParametersOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ParametersOrReferences) String() string {
@@ -3712,7 +3616,7 @@ func (*ParametersOrReferences) ProtoMessage() {}
func (x *ParametersOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3758,11 +3662,9 @@ type PathItem struct {
func (x *PathItem) Reset() {
*x = PathItem{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PathItem) String() string {
@@ -3773,7 +3675,7 @@ func (*PathItem) ProtoMessage() {}
func (x *PathItem) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3898,11 +3800,9 @@ type Paths struct {
func (x *Paths) Reset() {
*x = Paths{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Paths) String() string {
@@ -3913,7 +3813,7 @@ func (*Paths) ProtoMessage() {}
func (x *Paths) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3952,11 +3852,9 @@ type Properties struct {
func (x *Properties) Reset() {
*x = Properties{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Properties) String() string {
@@ -3967,7 +3865,7 @@ func (*Properties) ProtoMessage() {}
func (x *Properties) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4002,11 +3900,9 @@ type Reference struct {
func (x *Reference) Reset() {
*x = Reference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Reference) String() string {
@@ -4017,7 +3913,7 @@ func (*Reference) ProtoMessage() {}
func (x *Reference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4063,11 +3959,9 @@ type RequestBodiesOrReferences struct {
func (x *RequestBodiesOrReferences) Reset() {
*x = RequestBodiesOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RequestBodiesOrReferences) String() string {
@@ -4078,7 +3972,7 @@ func (*RequestBodiesOrReferences) ProtoMessage() {}
func (x *RequestBodiesOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4114,11 +4008,9 @@ type RequestBody struct {
func (x *RequestBody) Reset() {
*x = RequestBody{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RequestBody) String() string {
@@ -4129,7 +4021,7 @@ func (*RequestBody) ProtoMessage() {}
func (x *RequestBody) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4178,6 +4070,7 @@ type RequestBodyOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *RequestBodyOrReference_RequestBody
// *RequestBodyOrReference_Reference
Oneof isRequestBodyOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -4185,11 +4078,9 @@ type RequestBodyOrReference struct {
func (x *RequestBodyOrReference) Reset() {
*x = RequestBodyOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RequestBodyOrReference) String() string {
@@ -4200,7 +4091,7 @@ func (*RequestBodyOrReference) ProtoMessage() {}
func (x *RequestBodyOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4267,11 +4158,9 @@ type Response struct {
func (x *Response) Reset() {
*x = Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Response) String() string {
@@ -4282,7 +4171,7 @@ func (*Response) ProtoMessage() {}
func (x *Response) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4338,6 +4227,7 @@ type ResponseOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *ResponseOrReference_Response
// *ResponseOrReference_Reference
Oneof isResponseOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -4345,11 +4235,9 @@ type ResponseOrReference struct {
func (x *ResponseOrReference) Reset() {
*x = ResponseOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ResponseOrReference) String() string {
@@ -4360,7 +4248,7 @@ func (*ResponseOrReference) ProtoMessage() {}
func (x *ResponseOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4425,11 +4313,9 @@ type Responses struct {
func (x *Responses) Reset() {
*x = Responses{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Responses) String() string {
@@ -4440,7 +4326,7 @@ func (*Responses) ProtoMessage() {}
func (x *Responses) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4486,11 +4372,9 @@ type ResponsesOrReferences struct {
func (x *ResponsesOrReferences) Reset() {
*x = ResponsesOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ResponsesOrReferences) String() string {
@@ -4501,7 +4385,7 @@ func (*ResponsesOrReferences) ProtoMessage() {}
func (x *ResponsesOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4569,11 +4453,9 @@ type Schema struct {
func (x *Schema) Reset() {
*x = Schema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Schema) String() string {
@@ -4584,7 +4466,7 @@ func (*Schema) ProtoMessage() {}
func (x *Schema) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4857,6 +4739,7 @@ type SchemaOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *SchemaOrReference_Schema
// *SchemaOrReference_Reference
Oneof isSchemaOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -4864,11 +4747,9 @@ type SchemaOrReference struct {
func (x *SchemaOrReference) Reset() {
*x = SchemaOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SchemaOrReference) String() string {
@@ -4879,7 +4760,7 @@ func (*SchemaOrReference) ProtoMessage() {}
func (x *SchemaOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4941,11 +4822,9 @@ type SchemasOrReferences struct {
func (x *SchemasOrReferences) Reset() {
*x = SchemasOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SchemasOrReferences) String() string {
@@ -4956,7 +4835,7 @@ func (*SchemasOrReferences) ProtoMessage() {}
func (x *SchemasOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4989,11 +4868,9 @@ type SecurityRequirement struct {
func (x *SecurityRequirement) Reset() {
*x = SecurityRequirement{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SecurityRequirement) String() string {
@@ -5004,7 +4881,7 @@ func (*SecurityRequirement) ProtoMessage() {}
func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5045,11 +4922,9 @@ type SecurityScheme struct {
func (x *SecurityScheme) Reset() {
*x = SecurityScheme{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SecurityScheme) String() string {
@@ -5060,7 +4935,7 @@ func (*SecurityScheme) ProtoMessage() {}
func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5144,6 +5019,7 @@ type SecuritySchemeOrReference struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *SecuritySchemeOrReference_SecurityScheme
// *SecuritySchemeOrReference_Reference
Oneof isSecuritySchemeOrReference_Oneof `protobuf_oneof:"oneof"`
@@ -5151,11 +5027,9 @@ type SecuritySchemeOrReference struct {
func (x *SecuritySchemeOrReference) Reset() {
*x = SecuritySchemeOrReference{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SecuritySchemeOrReference) String() string {
@@ -5166,7 +5040,7 @@ func (*SecuritySchemeOrReference) ProtoMessage() {}
func (x *SecuritySchemeOrReference) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5228,11 +5102,9 @@ type SecuritySchemesOrReferences struct {
func (x *SecuritySchemesOrReferences) Reset() {
*x = SecuritySchemesOrReferences{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SecuritySchemesOrReferences) String() string {
@@ -5243,7 +5115,7 @@ func (*SecuritySchemesOrReferences) ProtoMessage() {}
func (x *SecuritySchemesOrReferences) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5279,11 +5151,9 @@ type Server struct {
func (x *Server) Reset() {
*x = Server{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Server) String() string {
@@ -5294,7 +5164,7 @@ func (*Server) ProtoMessage() {}
func (x *Server) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5351,11 +5221,9 @@ type ServerVariable struct {
func (x *ServerVariable) Reset() {
*x = ServerVariable{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServerVariable) String() string {
@@ -5366,7 +5234,7 @@ func (*ServerVariable) ProtoMessage() {}
func (x *ServerVariable) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5419,11 +5287,9 @@ type ServerVariables struct {
func (x *ServerVariables) Reset() {
*x = ServerVariables{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServerVariables) String() string {
@@ -5434,7 +5300,7 @@ func (*ServerVariables) ProtoMessage() {}
func (x *ServerVariables) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5463,6 +5329,7 @@ type SpecificationExtension struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Oneof:
+ //
// *SpecificationExtension_Number
// *SpecificationExtension_Boolean
// *SpecificationExtension_String_
@@ -5471,11 +5338,9 @@ type SpecificationExtension struct {
func (x *SpecificationExtension) Reset() {
*x = SpecificationExtension{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SpecificationExtension) String() string {
@@ -5486,7 +5351,7 @@ func (*SpecificationExtension) ProtoMessage() {}
func (x *SpecificationExtension) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5561,11 +5426,9 @@ type StringArray struct {
func (x *StringArray) Reset() {
*x = StringArray{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StringArray) String() string {
@@ -5576,7 +5439,7 @@ func (*StringArray) ProtoMessage() {}
func (x *StringArray) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5608,11 +5471,9 @@ type Strings struct {
func (x *Strings) Reset() {
*x = Strings{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Strings) String() string {
@@ -5623,7 +5484,7 @@ func (*Strings) ProtoMessage() {}
func (x *Strings) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5659,11 +5520,9 @@ type Tag struct {
func (x *Tag) Reset() {
*x = Tag{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Tag) String() string {
@@ -5674,7 +5533,7 @@ func (*Tag) ProtoMessage() {}
func (x *Tag) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5733,11 +5592,9 @@ type Xml struct {
func (x *Xml) Reset() {
*x = Xml{}
- if protoimpl.UnsafeEnabled {
- mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Xml) String() string {
@@ -5748,7 +5605,7 @@ func (*Xml) ProtoMessage() {}
func (x *Xml) ProtoReflect() protoreflect.Message {
mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6781,7 +6638,7 @@ func file_openapiv3_OpenAPIv3_proto_rawDescGZIP() []byte {
}
var file_openapiv3_OpenAPIv3_proto_msgTypes = make([]protoimpl.MessageInfo, 78)
-var file_openapiv3_OpenAPIv3_proto_goTypes = []interface{}{
+var file_openapiv3_OpenAPIv3_proto_goTypes = []any{
(*AdditionalPropertiesItem)(nil), // 0: openapi.v3.AdditionalPropertiesItem
(*Any)(nil), // 1: openapi.v3.Any
(*AnyOrExpression)(nil), // 2: openapi.v3.AnyOrExpression
@@ -7040,994 +6897,56 @@ func file_openapiv3_OpenAPIv3_proto_init() {
if File_openapiv3_OpenAPIv3_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_openapiv3_OpenAPIv3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AdditionalPropertiesItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Any); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AnyOrExpression); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Callback); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CallbackOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CallbacksOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Components); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Contact); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DefaultType); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Discriminator); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Document); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Encoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Encodings); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Example); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExampleOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExamplesOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Expression); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExternalDocs); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Header); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HeaderOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HeadersOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Info); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ItemsItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*License); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Link); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LinkOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LinksOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MediaType); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MediaTypes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedAny); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedCallbackOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedEncoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedExampleOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedHeaderOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedLinkOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedMediaType); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedParameterOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedPathItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedRequestBodyOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedResponseOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedSchemaOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedSecuritySchemeOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedServerVariable); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedString); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NamedStringArray); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OauthFlow); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OauthFlows); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Object); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Operation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Parameter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ParameterOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ParametersOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PathItem); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Paths); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Properties); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Reference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RequestBodiesOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RequestBody); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RequestBodyOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResponseOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Responses); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResponsesOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SchemaOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SchemasOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SecurityRequirement); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SecurityScheme); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SecuritySchemeOrReference); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SecuritySchemesOrReferences); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Server); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServerVariable); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServerVariables); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SpecificationExtension); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StringArray); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Strings); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Tag); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Xml); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []any{
(*AdditionalPropertiesItem_SchemaOrReference)(nil),
(*AdditionalPropertiesItem_Boolean)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []any{
(*AnyOrExpression_Any)(nil),
(*AnyOrExpression_Expression)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []any{
(*CallbackOrReference_Callback)(nil),
(*CallbackOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []any{
(*DefaultType_Number)(nil),
(*DefaultType_Boolean)(nil),
(*DefaultType_String_)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []any{
(*ExampleOrReference_Example)(nil),
(*ExampleOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []any{
(*HeaderOrReference_Header)(nil),
(*HeaderOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []any{
(*LinkOrReference_Link)(nil),
(*LinkOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []any{
(*ParameterOrReference_Parameter)(nil),
(*ParameterOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []any{
(*RequestBodyOrReference_RequestBody)(nil),
(*RequestBodyOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []any{
(*ResponseOrReference_Response)(nil),
(*ResponseOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []any{
(*SchemaOrReference_Schema)(nil),
(*SchemaOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []any{
(*SecuritySchemeOrReference_SecurityScheme)(nil),
(*SecuritySchemeOrReference_Reference)(nil),
}
- file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []interface{}{
+ file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []any{
(*SpecificationExtension_Number)(nil),
(*SpecificationExtension_Boolean)(nil),
(*SpecificationExtension_String_)(nil),
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go
new file mode 100644
index 0000000000..f9f1bd2654
--- /dev/null
+++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go
@@ -0,0 +1,182 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.1
+// protoc v4.23.4
+// source: openapiv3/annotations.proto
+
+package openapi_v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*Document)(nil),
+ Field: 1143,
+ Name: "openapi.v3.document",
+ Tag: "bytes,1143,opt,name=document",
+ Filename: "openapiv3/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*Operation)(nil),
+ Field: 1143,
+ Name: "openapi.v3.operation",
+ Tag: "bytes,1143,opt,name=operation",
+ Filename: "openapiv3/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1143,
+ Name: "openapi.v3.schema",
+ Tag: "bytes,1143,opt,name=schema",
+ Filename: "openapiv3/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1143,
+ Name: "openapi.v3.property",
+ Tag: "bytes,1143,opt,name=property",
+ Filename: "openapiv3/annotations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional openapi.v3.Document document = 1143;
+ E_Document = &file_openapiv3_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // optional openapi.v3.Operation operation = 1143;
+ E_Operation = &file_openapiv3_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional openapi.v3.Schema schema = 1143;
+ E_Schema = &file_openapiv3_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional openapi.v3.Schema property = 1143;
+ E_Property = &file_openapiv3_annotations_proto_extTypes[3]
+)
+
+var File_openapiv3_annotations_proto protoreflect.FileDescriptor
+
+var file_openapiv3_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64,
+ 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a,
+ 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70,
+ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f,
+ 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_openapiv3_annotations_proto_goTypes = []any{
+ (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
+ (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
+ (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
+ (*Document)(nil), // 4: openapi.v3.Document
+ (*Operation)(nil), // 5: openapi.v3.Operation
+ (*Schema)(nil), // 6: openapi.v3.Schema
+}
+var file_openapiv3_annotations_proto_depIdxs = []int32{
+ 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions
+ 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions
+ 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions
+ 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions
+ 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document
+ 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation
+ 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema
+ 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema
+ 8, // [8:8] is the sub-list for method output_type
+ 8, // [8:8] is the sub-list for method input_type
+ 4, // [4:8] is the sub-list for extension type_name
+ 0, // [0:4] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_openapiv3_annotations_proto_init() }
+func file_openapiv3_annotations_proto_init() {
+ if File_openapiv3_annotations_proto != nil {
+ return
+ }
+ file_openapiv3_OpenAPIv3_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_openapiv3_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_openapiv3_annotations_proto_goTypes,
+ DependencyIndexes: file_openapiv3_annotations_proto_depIdxs,
+ ExtensionInfos: file_openapiv3_annotations_proto_extTypes,
+ }.Build()
+ File_openapiv3_annotations_proto = out.File
+ file_openapiv3_annotations_proto_rawDesc = nil
+ file_openapiv3_annotations_proto_goTypes = nil
+ file_openapiv3_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto
new file mode 100644
index 0000000000..09ee0aac51
--- /dev/null
+++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto
@@ -0,0 +1,56 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package openapi.v3;
+
+import "google/protobuf/descriptor.proto";
+import "openapiv3/OpenAPIv3.proto";
+
+// The Go package name.
+option go_package = "./openapiv3;openapi_v3";
+// This option lets the proto compiler generate Java code inside the package
+// name (see below) instead of inside an outer class. It creates a simpler
+// developer experience by reducing one-level of name nesting and be
+// consistent with most programming languages that don't support outer classes.
+option java_multiple_files = true;
+// The Java outer classname should be the filename in UpperCamelCase. This
+// class is only used to hold proto descriptor, so developers don't need to
+// work with it directly.
+option java_outer_classname = "AnnotationsProto";
+// The Java package name must be proto package name with proper prefix.
+option java_package = "org.openapi_v3";
+// A reasonable prefix for the Objective-C symbols generated from the package.
+// It should at a minimum be 3 characters long, all uppercase, and convention
+// is to use an abbreviation of the package name. Something short, but
+// hopefully unique enough to not conflict with things that may come along in
+// the future. 'GPB' is reserved for the protocol buffer implementation itself.
+option objc_class_prefix = "OAS";
+
+extend google.protobuf.FileOptions {
+ Document document = 1143;
+}
+
+extend google.protobuf.MethodOptions {
+ Operation operation = 1143;
+}
+
+extend google.protobuf.MessageOptions {
+ Schema schema = 1143;
+}
+
+extend google.protobuf.FieldOptions {
+ Schema property = 1143;
+}
diff --git a/vendor/github.com/google/gnostic-models/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go
index 1cee467735..499ff883c5 100644
--- a/vendor/github.com/google/gnostic-models/openapiv3/document.go
+++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go
@@ -15,7 +15,7 @@
package openapi_v3
import (
- "gopkg.in/yaml.v3"
+ yaml "go.yaml.in/yaml/v3"
"github.com/google/gnostic-models/compiler"
)
diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml
deleted file mode 100644
index 061d72ae07..0000000000
--- a/vendor/github.com/google/gofuzz/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-
-go:
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - master
-
-script:
- - go test -cover
diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
deleted file mode 100644
index 97c1b34fd5..0000000000
--- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# How to contribute #
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-
-## Contributor License Agreement ##
-
-Contributions to any Google project must be accompanied by a Contributor
-License Agreement. This is not a copyright **assignment**, it simply gives
-Google permission to use and redistribute your contributions as part of the
-project.
-
- * If you are an individual writing original source code and you're sure you
- own the intellectual property, then you'll need to sign an [individual
- CLA][].
-
- * If you work for a company that wants to allow you to contribute your work,
- then you'll need to sign a [corporate CLA][].
-
-You generally only need to submit a CLA once, so if you've already submitted
-one (even if it was for a different project), you probably don't need to do it
-again.
-
-[individual CLA]: https://developers.google.com/open-source/cla/individual
-[corporate CLA]: https://developers.google.com/open-source/cla/corporate
-
-
-## Submitting a patch ##
-
- 1. It's generally best to start by opening a new issue describing the bug or
- feature you're intending to fix. Even if you think it's relatively minor,
- it's helpful to know what people are working on. Mention in the initial
- issue that you are planning to work on that bug or feature so that it can
- be assigned to you.
-
- 1. Follow the normal process of [forking][] the project, and setup a new
- branch to work in. It's important that each group of changes be done in
- separate branches in order to ensure that a pull request only includes the
- commits related to that bug or feature.
-
- 1. Go makes it very simple to ensure properly formatted code, so always run
- `go fmt` on your code before committing it. You should also run
- [golint][] over your code. As noted in the [golint readme][], it's not
- strictly necessary that your code be completely "lint-free", but this will
- help you find common style issues.
-
- 1. Any significant changes should almost always be accompanied by tests. The
- project already has good test coverage, so look at some of the existing
- tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
- are invaluable tools for seeing which parts of your code aren't being
- exercised by your tests.
-
- 1. Do your best to have [well-formed commit messages][] for each change.
- This provides consistency throughout the project, and ensures that commit
- messages are able to be formatted properly by various git tools.
-
- 1. Finally, push the commits to your fork and submit a [pull request][].
-
-[forking]: https://help.github.com/articles/fork-a-repo
-[golint]: https://github.com/golang/lint
-[golint readme]: https://github.com/golang/lint/blob/master/README
-[gocov]: https://github.com/axw/gocov
-[gocov-html]: https://github.com/matm/gocov-html
-[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
-[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
-[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
deleted file mode 100644
index 761520a8ce..0000000000
--- a/vendor/github.com/google/gofuzz/fuzz.go
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package fuzz
-
-import (
- "fmt"
- "math/rand"
- "reflect"
- "regexp"
- "time"
-
- "github.com/google/gofuzz/bytesource"
- "strings"
-)
-
-// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
-type fuzzFuncMap map[reflect.Type]reflect.Value
-
-// Fuzzer knows how to fill any object with random fields.
-type Fuzzer struct {
- fuzzFuncs fuzzFuncMap
- defaultFuzzFuncs fuzzFuncMap
- r *rand.Rand
- nilChance float64
- minElements int
- maxElements int
- maxDepth int
- skipFieldPatterns []*regexp.Regexp
-}
-
-// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
-// RandSource, NilChance, or NumElements in any order.
-func New() *Fuzzer {
- return NewWithSeed(time.Now().UnixNano())
-}
-
-func NewWithSeed(seed int64) *Fuzzer {
- f := &Fuzzer{
- defaultFuzzFuncs: fuzzFuncMap{
- reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
- },
-
- fuzzFuncs: fuzzFuncMap{},
- r: rand.New(rand.NewSource(seed)),
- nilChance: .2,
- minElements: 1,
- maxElements: 10,
- maxDepth: 100,
- }
- return f
-}
-
-// NewFromGoFuzz is a helper function that enables using gofuzz (this
-// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
-// fuzzing. Essentially, it enables translating the fuzzing bytes from
-// go-fuzz to any Go object using this library.
-//
-// This implementation promises a constant translation from a given slice of
-// bytes to the fuzzed objects. This promise will remain over future
-// versions of Go and of this library.
-//
-// Note: the returned Fuzzer should not be shared between multiple goroutines,
-// as its deterministic output will no longer be available.
-//
-// Example: use go-fuzz to test the function `MyFunc(int)` in the package
-// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
-//
-// // +build gofuzz
-// package mypacakge
-// import fuzz "github.com/google/gofuzz"
-// func Fuzz(data []byte) int {
-// var i int
-// fuzz.NewFromGoFuzz(data).Fuzz(&i)
-// MyFunc(i)
-// return 0
-// }
-func NewFromGoFuzz(data []byte) *Fuzzer {
- return New().RandSource(bytesource.New(data))
-}
-
-// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
-//
-// Each entry in fuzzFuncs must be a function taking two parameters.
-// The first parameter must be a pointer or map. It is the variable that
-// function will fill with random data. The second parameter must be a
-// fuzz.Continue, which will provide a source of randomness and a way
-// to automatically continue fuzzing smaller pieces of the first parameter.
-//
-// These functions are called sensibly, e.g., if you wanted custom string
-// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
-// called and passed the address of strings. Maps and pointers will always
-// be made/new'd for you, ignoring the NilChange option. For slices, it
-// doesn't make much sense to pre-create them--Fuzzer doesn't know how
-// long you want your slice--so take a pointer to a slice, and make it
-// yourself. (If you don't want your map/pointer type pre-made, take a
-// pointer to it, and make it yourself.) See the examples for a range of
-// custom functions.
-func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
- for i := range fuzzFuncs {
- v := reflect.ValueOf(fuzzFuncs[i])
- if v.Kind() != reflect.Func {
- panic("Need only funcs!")
- }
- t := v.Type()
- if t.NumIn() != 2 || t.NumOut() != 0 {
- panic("Need 2 in and 0 out params!")
- }
- argT := t.In(0)
- switch argT.Kind() {
- case reflect.Ptr, reflect.Map:
- default:
- panic("fuzzFunc must take pointer or map type")
- }
- if t.In(1) != reflect.TypeOf(Continue{}) {
- panic("fuzzFunc's second parameter must be type fuzz.Continue")
- }
- f.fuzzFuncs[argT] = v
- }
- return f
-}
-
-// RandSource causes f to get values from the given source of randomness.
-// Use if you want deterministic fuzzing.
-func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
- f.r = rand.New(s)
- return f
-}
-
-// NilChance sets the probability of creating a nil pointer, map, or slice to
-// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
-func (f *Fuzzer) NilChance(p float64) *Fuzzer {
- if p < 0 || p > 1 {
- panic("p should be between 0 and 1, inclusive.")
- }
- f.nilChance = p
- return f
-}
-
-// NumElements sets the minimum and maximum number of elements that will be
-// added to a non-nil map or slice.
-func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
- if atLeast > atMost {
- panic("atLeast must be <= atMost")
- }
- if atLeast < 0 {
- panic("atLeast must be >= 0")
- }
- f.minElements = atLeast
- f.maxElements = atMost
- return f
-}
-
-func (f *Fuzzer) genElementCount() int {
- if f.minElements == f.maxElements {
- return f.minElements
- }
- return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
-}
-
-func (f *Fuzzer) genShouldFill() bool {
- return f.r.Float64() >= f.nilChance
-}
-
-// MaxDepth sets the maximum number of recursive fuzz calls that will be made
-// before stopping. This includes struct members, pointers, and map and slice
-// elements.
-func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
- f.maxDepth = d
- return f
-}
-
-// Skip fields which match the supplied pattern. Call this multiple times if needed
-// This is useful to skip XXX_ fields generated by protobuf
-func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer {
- f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
- return f
-}
-
-// Fuzz recursively fills all of obj's fields with something random. First
-// this tries to find a custom fuzz function (see Funcs). If there is no
-// custom function this tests whether the object implements fuzz.Interface and,
-// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
-// there is a default fuzz function provided by this package. If all of that
-// fails, this will generate random values for all primitive fields and then
-// recurse for all non-primitives.
-//
-// This is safe for cyclic or tree-like structs, up to a limit. Use the
-// MaxDepth method to adjust how deep you need it to recurse.
-//
-// obj must be a pointer. Only exported (public) fields can be set (thanks,
-// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
-// fields.
-func (f *Fuzzer) Fuzz(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- f.fuzzWithContext(v, 0)
-}
-
-// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
-// obj's type will not be called and obj will not be tested for fuzz.Interface
-// conformance. This applies only to obj and not other instances of obj's
-// type.
-// Not safe for cyclic or tree-like structs!
-// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
-// Intended for tests, so will panic on bad input or unimplemented fields.
-func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- f.fuzzWithContext(v, flagNoCustomFuzz)
-}
-
-const (
- // Do not try to find a custom fuzz function. Does not apply recursively.
- flagNoCustomFuzz uint64 = 1 << iota
-)
-
-func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
- fc := &fuzzerContext{fuzzer: f}
- fc.doFuzz(v, flags)
-}
-
-// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
-// be thread-safe.
-type fuzzerContext struct {
- fuzzer *Fuzzer
- curDepth int
-}
-
-func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
- if fc.curDepth >= fc.fuzzer.maxDepth {
- return
- }
- fc.curDepth++
- defer func() { fc.curDepth-- }()
-
- if !v.CanSet() {
- return
- }
-
- if flags&flagNoCustomFuzz == 0 {
- // Check for both pointer and non-pointer custom functions.
- if v.CanAddr() && fc.tryCustom(v.Addr()) {
- return
- }
- if fc.tryCustom(v) {
- return
- }
- }
-
- if fn, ok := fillFuncMap[v.Kind()]; ok {
- fn(v, fc.fuzzer.r)
- return
- }
-
- switch v.Kind() {
- case reflect.Map:
- if fc.fuzzer.genShouldFill() {
- v.Set(reflect.MakeMap(v.Type()))
- n := fc.fuzzer.genElementCount()
- for i := 0; i < n; i++ {
- key := reflect.New(v.Type().Key()).Elem()
- fc.doFuzz(key, 0)
- val := reflect.New(v.Type().Elem()).Elem()
- fc.doFuzz(val, 0)
- v.SetMapIndex(key, val)
- }
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Ptr:
- if fc.fuzzer.genShouldFill() {
- v.Set(reflect.New(v.Type().Elem()))
- fc.doFuzz(v.Elem(), 0)
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Slice:
- if fc.fuzzer.genShouldFill() {
- n := fc.fuzzer.genElementCount()
- v.Set(reflect.MakeSlice(v.Type(), n, n))
- for i := 0; i < n; i++ {
- fc.doFuzz(v.Index(i), 0)
- }
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Array:
- if fc.fuzzer.genShouldFill() {
- n := v.Len()
- for i := 0; i < n; i++ {
- fc.doFuzz(v.Index(i), 0)
- }
- return
- }
- v.Set(reflect.Zero(v.Type()))
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- skipField := false
- fieldName := v.Type().Field(i).Name
- for _, pattern := range fc.fuzzer.skipFieldPatterns {
- if pattern.MatchString(fieldName) {
- skipField = true
- break
- }
- }
- if !skipField {
- fc.doFuzz(v.Field(i), 0)
- }
- }
- case reflect.Chan:
- fallthrough
- case reflect.Func:
- fallthrough
- case reflect.Interface:
- fallthrough
- default:
- panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
- }
-}
-
-// tryCustom searches for custom handlers, and returns true iff it finds a match
-// and successfully randomizes v.
-func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
- // First: see if we have a fuzz function for it.
- doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
- if !ok {
- // Second: see if it can fuzz itself.
- if v.CanInterface() {
- intf := v.Interface()
- if fuzzable, ok := intf.(Interface); ok {
- fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
- return true
- }
- }
- // Finally: see if there is a default fuzz function.
- doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
- if !ok {
- return false
- }
- }
-
- switch v.Kind() {
- case reflect.Ptr:
- if v.IsNil() {
- if !v.CanSet() {
- return false
- }
- v.Set(reflect.New(v.Type().Elem()))
- }
- case reflect.Map:
- if v.IsNil() {
- if !v.CanSet() {
- return false
- }
- v.Set(reflect.MakeMap(v.Type()))
- }
- default:
- return false
- }
-
- doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
- fc: fc,
- Rand: fc.fuzzer.r,
- })})
- return true
-}
-
-// Interface represents an object that knows how to fuzz itself. Any time we
-// find a type that implements this interface we will delegate the act of
-// fuzzing itself.
-type Interface interface {
- Fuzz(c Continue)
-}
-
-// Continue can be passed to custom fuzzing functions to allow them to use
-// the correct source of randomness and to continue fuzzing their members.
-type Continue struct {
- fc *fuzzerContext
-
- // For convenience, Continue implements rand.Rand via embedding.
- // Use this for generating any randomness if you want your fuzzing
- // to be repeatable for a given seed.
- *rand.Rand
-}
-
-// Fuzz continues fuzzing obj. obj must be a pointer.
-func (c Continue) Fuzz(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- c.fc.doFuzz(v, 0)
-}
-
-// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
-// obj's type will not be called and obj will not be tested for fuzz.Interface
-// conformance. This applies only to obj and not other instances of obj's
-// type.
-func (c Continue) FuzzNoCustom(obj interface{}) {
- v := reflect.ValueOf(obj)
- if v.Kind() != reflect.Ptr {
- panic("needed ptr!")
- }
- v = v.Elem()
- c.fc.doFuzz(v, flagNoCustomFuzz)
-}
-
-// RandString makes a random string up to 20 characters long. The returned string
-// may include a variety of (valid) UTF-8 encodings.
-func (c Continue) RandString() string {
- return randString(c.Rand)
-}
-
-// RandUint64 makes random 64 bit numbers.
-// Weirdly, rand doesn't have a function that gives you 64 random bits.
-func (c Continue) RandUint64() uint64 {
- return randUint64(c.Rand)
-}
-
-// RandBool returns true or false randomly.
-func (c Continue) RandBool() bool {
- return randBool(c.Rand)
-}
-
-func fuzzInt(v reflect.Value, r *rand.Rand) {
- v.SetInt(int64(randUint64(r)))
-}
-
-func fuzzUint(v reflect.Value, r *rand.Rand) {
- v.SetUint(randUint64(r))
-}
-
-func fuzzTime(t *time.Time, c Continue) {
- var sec, nsec int64
- // Allow for about 1000 years of random time values, which keeps things
- // like JSON parsing reasonably happy.
- sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
- c.Fuzz(&nsec)
- *t = time.Unix(sec, nsec)
-}
-
-var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
- reflect.Bool: func(v reflect.Value, r *rand.Rand) {
- v.SetBool(randBool(r))
- },
- reflect.Int: fuzzInt,
- reflect.Int8: fuzzInt,
- reflect.Int16: fuzzInt,
- reflect.Int32: fuzzInt,
- reflect.Int64: fuzzInt,
- reflect.Uint: fuzzUint,
- reflect.Uint8: fuzzUint,
- reflect.Uint16: fuzzUint,
- reflect.Uint32: fuzzUint,
- reflect.Uint64: fuzzUint,
- reflect.Uintptr: fuzzUint,
- reflect.Float32: func(v reflect.Value, r *rand.Rand) {
- v.SetFloat(float64(r.Float32()))
- },
- reflect.Float64: func(v reflect.Value, r *rand.Rand) {
- v.SetFloat(r.Float64())
- },
- reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
- v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
- },
- reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
- v.SetComplex(complex(r.Float64(), r.Float64()))
- },
- reflect.String: func(v reflect.Value, r *rand.Rand) {
- v.SetString(randString(r))
- },
- reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
- panic("unimplemented")
- },
-}
-
-// randBool returns true or false randomly.
-func randBool(r *rand.Rand) bool {
- return r.Int31()&(1<<30) == 0
-}
-
-type int63nPicker interface {
- Int63n(int64) int64
-}
-
-// UnicodeRange describes a sequential range of unicode characters.
-// Last must be numerically greater than First.
-type UnicodeRange struct {
- First, Last rune
-}
-
-// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
-// To be useful, each range must have at least one character (First <= Last) and
-// there must be at least one range.
-type UnicodeRanges []UnicodeRange
-
-// choose returns a random unicode character from the given range, using the
-// given randomness source.
-func (ur UnicodeRange) choose(r int63nPicker) rune {
- count := int64(ur.Last - ur.First + 1)
- return ur.First + rune(r.Int63n(count))
-}
-
-// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
-// Each character is selected from the range ur. If there are no characters
-// in the range (cr.Last < cr.First), this will panic.
-func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
- ur.check()
- return func(s *string, c Continue) {
- *s = ur.randString(c.Rand)
- }
-}
-
-// check is a function that used to check whether the first of ur(UnicodeRange)
-// is greater than the last one.
-func (ur UnicodeRange) check() {
- if ur.Last < ur.First {
- panic("The last encoding must be greater than the first one.")
- }
-}
-
-// randString of UnicodeRange makes a random string up to 20 characters long.
-// Each character is selected form ur(UnicodeRange).
-func (ur UnicodeRange) randString(r *rand.Rand) string {
- n := r.Intn(20)
- sb := strings.Builder{}
- sb.Grow(n)
- for i := 0; i < n; i++ {
- sb.WriteRune(ur.choose(r))
- }
- return sb.String()
-}
-
-// defaultUnicodeRanges sets a default unicode range when user do not set
-// CustomStringFuzzFunc() but wants fuzz string.
-var defaultUnicodeRanges = UnicodeRanges{
- {' ', '~'}, // ASCII characters
- {'\u00a0', '\u02af'}, // Multi-byte encoded characters
- {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
-}
-
-// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
-// Each character is selected from one of the ranges of ur(UnicodeRanges).
-// Each range has an equal probability of being chosen. If there are no ranges,
-// or a selected range has no characters (.Last < .First), this will panic.
-// Do not modify any of the ranges in ur after calling this function.
-func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
- // Check unicode ranges slice is empty.
- if len(ur) == 0 {
- panic("UnicodeRanges is empty.")
- }
- // if not empty, each range should be checked.
- for i := range ur {
- ur[i].check()
- }
- return func(s *string, c Continue) {
- *s = ur.randString(c.Rand)
- }
-}
-
-// randString of UnicodeRanges makes a random string up to 20 characters long.
-// Each character is selected form one of the ranges of ur(UnicodeRanges),
-// and each range has an equal probability of being chosen.
-func (ur UnicodeRanges) randString(r *rand.Rand) string {
- n := r.Intn(20)
- sb := strings.Builder{}
- sb.Grow(n)
- for i := 0; i < n; i++ {
- sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
- }
- return sb.String()
-}
-
-// randString makes a random string up to 20 characters long. The returned string
-// may include a variety of (valid) UTF-8 encodings.
-func randString(r *rand.Rand) string {
- return defaultUnicodeRanges.randString(r)
-}
-
-// randUint64 makes random 64 bit numbers.
-// Weirdly, rand doesn't have a function that gives you 64 random bits.
-func randUint64(r *rand.Rand) uint64 {
- return uint64(r.Uint32())<<32 | uint64(r.Uint32())
-}
diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go
index ab7f03ae26..8ce9d3cf3b 100644
--- a/vendor/github.com/google/pprof/profile/encode.go
+++ b/vendor/github.com/google/pprof/profile/encode.go
@@ -17,6 +17,7 @@ package profile
import (
"errors"
"sort"
+ "strings"
)
func (p *Profile) decoder() []decoder {
@@ -121,6 +122,7 @@ func (p *Profile) preEncode() {
}
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
+ p.docURLX = addString(strings, p.DocURL)
p.stringTable = make([]string, len(strings))
for s, i := range strings {
@@ -155,6 +157,7 @@ func (p *Profile) encode(b *buffer) {
encodeInt64Opt(b, 12, p.Period)
encodeInt64s(b, 13, p.commentX)
encodeInt64(b, 14, p.defaultSampleTypeX)
+ encodeInt64Opt(b, 15, p.docURLX)
}
var profileDecoder = []decoder{
@@ -183,12 +186,13 @@ var profileDecoder = []decoder{
// repeated Location location = 4
func(b *buffer, m message) error {
x := new(Location)
- x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
+ x.Line = b.tmpLines[:0] // Use shared space temporarily
pp := m.(*Profile)
pp.Location = append(pp.Location, x)
err := decodeMessage(b, x)
- var tmp []Line
- x.Line = append(tmp, x.Line...) // Shrink to allocated size
+ b.tmpLines = x.Line[:0]
+ // Copy to shrink size and detach from shared space.
+ x.Line = append([]Line(nil), x.Line...)
return err
},
// repeated Function function = 5
@@ -235,6 +239,8 @@ var profileDecoder = []decoder{
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
// int64 defaultSampleType = 14
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
+ // string doc_link = 15;
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) },
}
// postDecode takes the unexported fields populated by decode (with
@@ -252,6 +258,14 @@ func (p *Profile) postDecode() error {
} else {
mappings[m.ID] = m
}
+
+ // If this a main linux kernel mapping with a relocation symbol suffix
+ // ("[kernel.kallsyms]_text"), extract said suffix.
+ // It is fairly hacky to handle at this level, but the alternatives appear even worse.
+ const prefix = "[kernel.kallsyms]"
+ if strings.HasPrefix(m.File, prefix) {
+ m.KernelRelocationSymbol = m.File[len(prefix):]
+ }
}
functions := make(map[uint64]*Function, len(p.Function))
@@ -298,41 +312,52 @@ func (p *Profile) postDecode() error {
st.Unit, err = getString(p.stringTable, &st.unitX, err)
}
+ // Pre-allocate space for all locations.
+ numLocations := 0
+ for _, s := range p.Sample {
+ numLocations += len(s.locationIDX)
+ }
+ locBuffer := make([]*Location, numLocations)
+
for _, s := range p.Sample {
- labels := make(map[string][]string, len(s.labelX))
- numLabels := make(map[string][]int64, len(s.labelX))
- numUnits := make(map[string][]string, len(s.labelX))
- for _, l := range s.labelX {
- var key, value string
- key, err = getString(p.stringTable, &l.keyX, err)
- if l.strX != 0 {
- value, err = getString(p.stringTable, &l.strX, err)
- labels[key] = append(labels[key], value)
- } else if l.numX != 0 || l.unitX != 0 {
- numValues := numLabels[key]
- units := numUnits[key]
- if l.unitX != 0 {
- var unit string
- unit, err = getString(p.stringTable, &l.unitX, err)
- units = padStringArray(units, len(numValues))
- numUnits[key] = append(units, unit)
+ if len(s.labelX) > 0 {
+ labels := make(map[string][]string, len(s.labelX))
+ numLabels := make(map[string][]int64, len(s.labelX))
+ numUnits := make(map[string][]string, len(s.labelX))
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else if l.numX != 0 || l.unitX != 0 {
+ numValues := numLabels[key]
+ units := numUnits[key]
+ if l.unitX != 0 {
+ var unit string
+ unit, err = getString(p.stringTable, &l.unitX, err)
+ units = padStringArray(units, len(numValues))
+ numUnits[key] = append(units, unit)
+ }
+ numLabels[key] = append(numLabels[key], l.numX)
}
- numLabels[key] = append(numLabels[key], l.numX)
}
- }
- if len(labels) > 0 {
- s.Label = labels
- }
- if len(numLabels) > 0 {
- s.NumLabel = numLabels
- for key, units := range numUnits {
- if len(units) > 0 {
- numUnits[key] = padStringArray(units, len(numLabels[key]))
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ for key, units := range numUnits {
+ if len(units) > 0 {
+ numUnits[key] = padStringArray(units, len(numLabels[key]))
+ }
}
+ s.NumUnit = numUnits
}
- s.NumUnit = numUnits
}
- s.Location = make([]*Location, len(s.locationIDX))
+
+ s.Location = locBuffer[:len(s.locationIDX)]
+ locBuffer = locBuffer[len(s.locationIDX):]
for i, lid := range s.locationIDX {
if lid < uint64(len(locationIds)) {
s.Location[i] = locationIds[lid]
@@ -363,6 +388,7 @@ func (p *Profile) postDecode() error {
p.commentX = nil
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
+ p.DocURL, err = getString(p.stringTable, &p.docURLX, err)
p.stringTable = nil
return err
}
@@ -509,6 +535,7 @@ func (p *Line) decoder() []decoder {
func (p *Line) encode(b *buffer) {
encodeUint64Opt(b, 1, p.functionIDX)
encodeInt64Opt(b, 2, p.Line)
+ encodeInt64Opt(b, 3, p.Column)
}
var lineDecoder = []decoder{
@@ -517,6 +544,8 @@ var lineDecoder = []decoder{
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
// optional int64 line = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+ // optional int64 column = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
}
func (p *Function) decoder() []decoder {
diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go
index ea8e66c68d..c794b93906 100644
--- a/vendor/github.com/google/pprof/profile/filter.go
+++ b/vendor/github.com/google/pprof/profile/filter.go
@@ -22,6 +22,10 @@ import "regexp"
// samples where at least one frame matches focus but none match ignore.
// Returns true is the corresponding regexp matched at least one sample.
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
+ if focus == nil && ignore == nil && hide == nil && show == nil {
+ fm = true // Missing focus implies a match
+ return
+ }
focusOrIgnore := make(map[uint64]bool)
hidden := make(map[uint64]bool)
for _, l := range p.Location {
diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
index 91f45e53c6..4580bab183 100644
--- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go
+++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
@@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte
}
// Strip out addresses for better merge.
- if err = p.Aggregate(true, true, true, true, false); err != nil {
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
return nil, err
}
@@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) {
}
// Strip out addresses for better merge.
- if err = p.Aggregate(true, true, true, true, false); err != nil {
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
return nil, err
}
diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go
index 0c8f3bb5b7..8d07fd6c27 100644
--- a/vendor/github.com/google/pprof/profile/legacy_profile.go
+++ b/vendor/github.com/google/pprof/profile/legacy_profile.go
@@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) {
//
// The general format for profilez samples is a sequence of words in
// binary format. The first words are a header with the following data:
-// 1st word -- 0
-// 2nd word -- 3
-// 3rd word -- 0 if a c++ application, 1 if a java application.
-// 4th word -- Sampling period (in microseconds).
-// 5th word -- Padding.
+//
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
func parseCPU(b []byte) (*Profile, error) {
var parse func([]byte) (uint64, []byte)
var n1, n2, n3, n4, n5 uint64
@@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) {
//
// profilez samples are a repeated sequence of stack frames of the
// form:
-// 1st word -- The number of times this stack was encountered.
-// 2nd word -- The size of the stack (StackSize).
-// 3rd word -- The first address on the stack.
-// ...
-// StackSize + 2 -- The last address on the stack
+//
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+//
// The last stack trace is of the form:
-// 1st word -- 0
-// 2nd word -- 1
-// 3rd word -- 0
+//
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
//
// Addresses from stack traces may point to the next instruction after
// each call. Optionally adjust by -1 to land somewhere on the actual
@@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) {
// Recognize each thread and populate profile samples.
for !isMemoryMapSentinel(line) {
if strings.HasPrefix(line, "---- no stack trace for") {
- line = ""
break
}
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go
index 9978e7330e..ba4d746407 100644
--- a/vendor/github.com/google/pprof/profile/merge.go
+++ b/vendor/github.com/google/pprof/profile/merge.go
@@ -15,6 +15,7 @@
package profile
import (
+ "encoding/binary"
"fmt"
"sort"
"strconv"
@@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) {
for _, src := range srcs {
// Clear the profile-specific hash tables
- pm.locationsByID = make(map[uint64]*Location, len(src.Location))
+ pm.locationsByID = makeLocationIDMap(len(src.Location))
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
@@ -136,7 +137,7 @@ type profileMerger struct {
p *Profile
// Memoization tables within a profile.
- locationsByID map[uint64]*Location
+ locationsByID locationIDMap
functionsByID map[uint64]*Function
mappingsByID map[uint64]mapInfo
@@ -153,6 +154,16 @@ type mapInfo struct {
}
func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ // Check memoization table
+ k := pm.sampleKey(src)
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+
+ // Make new sample.
s := &Sample{
Location: make([]*Location, len(src.Location)),
Value: make([]int64, len(src.Value)),
@@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample {
s.NumLabel[k] = vv
s.NumUnit[k] = uu
}
- // Check memoization table. Must be done on the remapped location to
- // account for the remapped mapping. Add current values to the
- // existing sample.
- k := s.key()
- if ss, ok := pm.samples[k]; ok {
- for i, v := range src.Value {
- ss.Value[i] += v
- }
- return ss
- }
copy(s.Value, src.Value)
pm.samples[k] = s
pm.p.Sample = append(pm.p.Sample, s)
return s
}
-// key generates sampleKey to be used as a key for maps.
-func (sample *Sample) key() sampleKey {
- ids := make([]string, len(sample.Location))
- for i, l := range sample.Location {
- ids[i] = strconv.FormatUint(l.ID, 16)
+func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
+ // Accumulate contents into a string.
+ var buf strings.Builder
+ buf.Grow(64) // Heuristic to avoid extra allocs
+
+ // encode a number
+ putNumber := func(v uint64) {
+ var num [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(num[:], v)
+ buf.Write(num[:n])
+ }
+
+ // encode a string prefixed with its length.
+ putDelimitedString := func(s string) {
+ putNumber(uint64(len(s)))
+ buf.WriteString(s)
+ }
+
+ for _, l := range sample.Location {
+ // Get the location in the merged profile, which may have a different ID.
+ if loc := pm.mapLocation(l); loc != nil {
+ putNumber(loc.ID)
+ }
}
+ putNumber(0) // Delimiter
- labels := make([]string, 0, len(sample.Label))
- for k, v := range sample.Label {
- labels = append(labels, fmt.Sprintf("%q%q", k, v))
+ for _, l := range sortedKeys1(sample.Label) {
+ putDelimitedString(l)
+ values := sample.Label[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putDelimitedString(v)
+ }
}
- sort.Strings(labels)
- numlabels := make([]string, 0, len(sample.NumLabel))
- for k, v := range sample.NumLabel {
- numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
+ for _, l := range sortedKeys2(sample.NumLabel) {
+ putDelimitedString(l)
+ values := sample.NumLabel[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putNumber(uint64(v))
+ }
+ units := sample.NumUnit[l]
+ putNumber(uint64(len(units)))
+ for _, v := range units {
+ putDelimitedString(v)
+ }
}
- sort.Strings(numlabels)
- return sampleKey{
- strings.Join(ids, "|"),
- strings.Join(labels, ""),
- strings.Join(numlabels, ""),
+ return sampleKey(buf.String())
+}
+
+type sampleKey string
+
+// sortedKeys1 returns the sorted keys found in a string->[]string map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys2 and made into a generic function.
+func sortedKeys1(m map[string][]string) []string {
+ if len(m) == 0 {
+ return nil
}
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
}
-type sampleKey struct {
- locations string
- labels string
- numlabels string
+// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys1 and made into a generic function.
+func sortedKeys2(m map[string][]int64) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
}
func (pm *profileMerger) mapLocation(src *Location) *Location {
@@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
return nil
}
- if l, ok := pm.locationsByID[src.ID]; ok {
+ if l := pm.locationsByID.get(src.ID); l != nil {
return l
}
@@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
// account for the remapped mapping ID.
k := l.key()
if ll, ok := pm.locations[k]; ok {
- pm.locationsByID[src.ID] = ll
+ pm.locationsByID.set(src.ID, ll)
return ll
}
- pm.locationsByID[src.ID] = l
+ pm.locationsByID.set(src.ID, l)
pm.locations[k] = l
pm.p.Location = append(pm.p.Location, l)
return l
@@ -269,12 +326,13 @@ func (l *Location) key() locationKey {
key.addr -= l.Mapping.Start
key.mappingID = l.Mapping.ID
}
- lines := make([]string, len(l.Line)*2)
+ lines := make([]string, len(l.Line)*3)
for i, line := range l.Line {
if line.Function != nil {
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
}
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ lines[i*2+2] = strconv.FormatInt(line.Column, 16)
}
key.lines = strings.Join(lines, "|")
return key
@@ -303,16 +361,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
return mi
}
m := &Mapping{
- ID: uint64(len(pm.p.Mapping) + 1),
- Start: src.Start,
- Limit: src.Limit,
- Offset: src.Offset,
- File: src.File,
- BuildID: src.BuildID,
- HasFunctions: src.HasFunctions,
- HasFilenames: src.HasFilenames,
- HasLineNumbers: src.HasLineNumbers,
- HasInlineFrames: src.HasInlineFrames,
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ KernelRelocationSymbol: src.KernelRelocationSymbol,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
}
pm.p.Mapping = append(pm.p.Mapping, m)
@@ -360,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line {
ln := Line{
Function: pm.mapFunction(src.Function),
Line: src.Line,
+ Column: src.Column,
}
return ln
}
@@ -416,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
var timeNanos, durationNanos, period int64
var comments []string
seenComments := map[string]bool{}
+ var docURL string
var defaultSampleType string
for _, s := range srcs {
if timeNanos == 0 || s.TimeNanos < timeNanos {
@@ -434,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
if defaultSampleType == "" {
defaultSampleType = s.DefaultSampleType
}
+ if docURL == "" {
+ docURL = s.DocURL
+ }
}
p := &Profile{
@@ -449,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
Comments: comments,
DefaultSampleType: defaultSampleType,
+ DocURL: docURL,
}
copy(p.SampleType, srcs[0].SampleType)
return p, nil
@@ -479,3 +544,131 @@ func (p *Profile) compatible(pb *Profile) error {
func equalValueType(st1, st2 *ValueType) bool {
return st1.Type == st2.Type && st1.Unit == st2.Unit
}
+
+// locationIDMap is like a map[uint64]*Location, but provides efficiency for
+// ids that are densely numbered, which is often the case.
+type locationIDMap struct {
+ dense []*Location // indexed by id for id < len(dense)
+ sparse map[uint64]*Location // indexed by id for id >= len(dense)
+}
+
+func makeLocationIDMap(n int) locationIDMap {
+ return locationIDMap{
+ dense: make([]*Location, n),
+ sparse: map[uint64]*Location{},
+ }
+}
+
+func (lm locationIDMap) get(id uint64) *Location {
+ if id < uint64(len(lm.dense)) {
+ return lm.dense[int(id)]
+ }
+ return lm.sparse[id]
+}
+
+func (lm locationIDMap) set(id uint64, loc *Location) {
+ if id < uint64(len(lm.dense)) {
+ lm.dense[id] = loc
+ return
+ }
+ lm.sparse[id] = loc
+}
+
+// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
+// keeps sample types that appear in all profiles only and drops/reorders the
+// sample types as necessary.
+//
+// In the case of sample types order is not the same for given profiles the
+// order is derived from the first profile.
+//
+// Profiles are modified in-place.
+//
+// It returns an error if the sample type's intersection is empty.
+func CompatibilizeSampleTypes(ps []*Profile) error {
+ sTypes := commonSampleTypes(ps)
+ if len(sTypes) == 0 {
+ return fmt.Errorf("profiles have empty common sample type list")
+ }
+ for _, p := range ps {
+ if err := compatibilizeSampleTypes(p, sTypes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// commonSampleTypes returns sample types that appear in all profiles in the
+// order how they ordered in the first profile.
+func commonSampleTypes(ps []*Profile) []string {
+ if len(ps) == 0 {
+ return nil
+ }
+ sTypes := map[string]int{}
+ for _, p := range ps {
+ for _, st := range p.SampleType {
+ sTypes[st.Type]++
+ }
+ }
+ var res []string
+ for _, st := range ps[0].SampleType {
+ if sTypes[st.Type] == len(ps) {
+ res = append(res, st.Type)
+ }
+ }
+ return res
+}
+
+// compatibilizeSampleTypes drops sample types that are not present in sTypes
+// list and reorder them if needed.
+//
+// It sets DefaultSampleType to sType[0] if it is not in sType list.
+//
+// It assumes that all sample types from the sTypes list are present in the
+// given profile otherwise it returns an error.
+func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
+ if len(sTypes) == 0 {
+ return fmt.Errorf("sample type list is empty")
+ }
+ defaultSampleType := sTypes[0]
+ reMap, needToModify := make([]int, len(sTypes)), false
+ for i, st := range sTypes {
+ if st == p.DefaultSampleType {
+ defaultSampleType = p.DefaultSampleType
+ }
+ idx := searchValueType(p.SampleType, st)
+ if idx < 0 {
+ return fmt.Errorf("%q sample type is not found in profile", st)
+ }
+ reMap[i] = idx
+ if idx != i {
+ needToModify = true
+ }
+ }
+ if !needToModify && len(sTypes) == len(p.SampleType) {
+ return nil
+ }
+ p.DefaultSampleType = defaultSampleType
+ oldSampleTypes := p.SampleType
+ p.SampleType = make([]*ValueType, len(sTypes))
+ for i, idx := range reMap {
+ p.SampleType[i] = oldSampleTypes[idx]
+ }
+ values := make([]int64, len(sTypes))
+ for _, s := range p.Sample {
+ for i, idx := range reMap {
+ values[i] = s.Value[idx]
+ }
+ s.Value = s.Value[:len(values)]
+ copy(s.Value, values)
+ }
+ return nil
+}
+
+func searchValueType(vts []*ValueType, s string) int {
+ for i, vt := range vts {
+ if vt.Type == s {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go
index 2590c8ddb4..f47a243903 100644
--- a/vendor/github.com/google/pprof/profile/profile.go
+++ b/vendor/github.com/google/pprof/profile/profile.go
@@ -21,7 +21,6 @@ import (
"compress/gzip"
"fmt"
"io"
- "io/ioutil"
"math"
"path/filepath"
"regexp"
@@ -40,6 +39,7 @@ type Profile struct {
Location []*Location
Function []*Function
Comments []string
+ DocURL string
DropFrames string
KeepFrames string
@@ -54,6 +54,7 @@ type Profile struct {
encodeMu sync.Mutex
commentX []int64
+ docURLX int64
dropFramesX int64
keepFramesX int64
stringTable []string
@@ -73,9 +74,23 @@ type ValueType struct {
type Sample struct {
Location []*Location
Value []int64
- Label map[string][]string
+ // Label is a per-label-key map to values for string labels.
+ //
+ // In general, having multiple values for the given label key is strongly
+ // discouraged - see docs for the sample label field in profile.proto. The
+ // main reason this unlikely state is tracked here is to make the
+ // decoding->encoding roundtrip not lossy. But we expect that the value
+ // slices present in this map are always of length 1.
+ Label map[string][]string
+ // NumLabel is a per-label-key map to values for numeric labels. See a note
+ // above on handling multiple values for a label.
NumLabel map[string][]int64
- NumUnit map[string][]string
+ // NumUnit is a per-label-key map to the unit names of corresponding numeric
+ // label values. The unit info may be missing even if the label is in
+ // NumLabel, see the docs in profile.proto for details. When the value is
+ // slice is present and not nil, its length must be equal to the length of
+ // the corresponding value slice in NumLabel.
+ NumUnit map[string][]string
locationIDX []uint64
labelX []label
@@ -106,6 +121,15 @@ type Mapping struct {
fileX int64
buildIDX int64
+
+ // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
+ // For linux kernel mappings generated by some tools, correct symbolization depends
+ // on knowing which of the two possible relocation symbols was used for `Start`.
+ // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
+ //
+ // Note, this public field is not persisted in the proto. For the purposes of
+ // copying / merging / hashing profiles, it is considered subsumed by `File`.
+ KernelRelocationSymbol string
}
// Location corresponds to Profile.Location
@@ -123,6 +147,7 @@ type Location struct {
type Line struct {
Function *Function
Line int64
+ Column int64
functionIDX uint64
}
@@ -144,7 +169,7 @@ type Function struct {
// may be a gzip-compressed encoded protobuf or one of many legacy
// profile formats which may be unsupported in the future.
func Parse(r io.Reader) (*Profile, error) {
- data, err := ioutil.ReadAll(r)
+ data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@@ -159,7 +184,7 @@ func ParseData(data []byte) (*Profile, error) {
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err == nil {
- data, err = ioutil.ReadAll(gz)
+ data, err = io.ReadAll(gz)
}
if err != nil {
return nil, fmt.Errorf("decompressing profile: %v", err)
@@ -414,7 +439,7 @@ func (p *Profile) CheckValid() error {
// Aggregate merges the locations in the profile into equivalence
// classes preserving the request attributes. It also updates the
// samples to point to the merged locations.
-func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
for _, m := range p.Mapping {
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
m.HasFunctions = m.HasFunctions && function
@@ -436,7 +461,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
}
// Aggregate locations
- if !inlineFrame || !address || !linenumber {
+ if !inlineFrame || !address || !linenumber || !columnnumber {
for _, l := range p.Location {
if !inlineFrame && len(l.Line) > 1 {
l.Line = l.Line[len(l.Line)-1:]
@@ -444,6 +469,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
if !linenumber {
for i := range l.Line {
l.Line[i].Line = 0
+ l.Line[i].Column = 0
+ }
+ }
+ if !columnnumber {
+ for i := range l.Line {
+ l.Line[i].Column = 0
}
}
if !address {
@@ -526,6 +557,9 @@ func (p *Profile) String() string {
for _, c := range p.Comments {
ss = append(ss, "Comment: "+c)
}
+ if url := p.DocURL; url != "" {
+ ss = append(ss, fmt.Sprintf("Doc: %s", url))
+ }
if pt := p.PeriodType; pt != nil {
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
}
@@ -605,10 +639,11 @@ func (l *Location) string() string {
for li := range l.Line {
lnStr := "??"
if fn := l.Line[li].Function; fn != nil {
- lnStr = fmt.Sprintf("%s %s:%d s=%d",
+ lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
fn.Name,
fn.Filename,
l.Line[li].Line,
+ l.Line[li].Column,
fn.StartLine)
if fn.Name != fn.SystemName {
lnStr = lnStr + "(" + fn.SystemName + ")"
@@ -707,6 +742,35 @@ func (s *Sample) HasLabel(key, value string) bool {
return false
}
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+ for _, sample := range p.Sample {
+ if sample.NumLabel == nil {
+ sample.NumLabel = map[string][]int64{key: value}
+ } else {
+ sample.NumLabel[key] = value
+ }
+ if sample.NumUnit == nil {
+ sample.NumUnit = map[string][]string{key: unit}
+ } else {
+ sample.NumUnit[key] = unit
+ }
+ }
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.NumLabel, key)
+ delete(sample.NumUnit, key)
+ }
+}
+
// DiffBaseSample returns true if a sample belongs to the diff base and false
// otherwise.
func (s *Sample) DiffBaseSample() bool {
@@ -785,10 +849,10 @@ func (p *Profile) HasFileLines() bool {
// Unsymbolizable returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are
-// "[vdso]", [vsyscall]" and some others, see the code.
+// "[vdso]", "[vsyscall]" and some others, see the code.
func (m *Mapping) Unsymbolizable() bool {
name := filepath.Base(m.File)
- return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
+ return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
}
// Copy makes a fully independent copy of a profile.
diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go
index 539ad3ab33..a15696ba16 100644
--- a/vendor/github.com/google/pprof/profile/proto.go
+++ b/vendor/github.com/google/pprof/profile/proto.go
@@ -39,11 +39,12 @@ import (
)
type buffer struct {
- field int // field tag
- typ int // proto wire type code for field
- u64 uint64
- data []byte
- tmp [16]byte
+ field int // field tag
+ typ int // proto wire type code for field
+ u64 uint64
+ data []byte
+ tmp [16]byte
+ tmpLines []Line // temporary storage used while decoding "repeated Line".
}
type decoder func(*buffer, message) error
@@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error {
if b.typ == 2 {
// Packed encoding
data := b.data
- tmp := make([]int64, 0, len(data)) // Maximally sized
for len(data) > 0 {
var u uint64
var err error
@@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error {
if u, data, err = decodeVarint(data); err != nil {
return err
}
- tmp = append(tmp, int64(u))
+ *x = append(*x, int64(u))
}
- *x = append(*x, tmp...)
return nil
}
var i int64
@@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
if b.typ == 2 {
data := b.data
// Packed encoding
- tmp := make([]uint64, 0, len(data)) // Maximally sized
for len(data) > 0 {
var u uint64
var err error
@@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
if u, data, err = decodeVarint(data); err != nil {
return err
}
- tmp = append(tmp, u)
+ *x = append(*x, u)
}
- *x = append(*x, tmp...)
return nil
}
var u uint64
diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go
index 02d21a8184..b2f9fd5466 100644
--- a/vendor/github.com/google/pprof/profile/prune.go
+++ b/vendor/github.com/google/pprof/profile/prune.go
@@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
prune := make(map[uint64]bool)
pruneBeneath := make(map[uint64]bool)
+ // simplifyFunc can be expensive, so cache results.
+ // Note that the same function name can be encountered many times due
+ // different lines and addresses in the same function.
+ pruneCache := map[string]bool{} // Map from function to whether or not to prune
+ pruneFromHere := func(s string) bool {
+ if r, ok := pruneCache[s]; ok {
+ return r
+ }
+ funcName := simplifyFunc(s)
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ pruneCache[s] = true
+ return true
+ }
+ }
+ pruneCache[s] = false
+ return false
+ }
+
for _, loc := range p.Location {
var i int
for i = len(loc.Line) - 1; i >= 0; i-- {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
- funcName := simplifyFunc(fn.Name)
- if dropRx.MatchString(funcName) {
- if keepRx == nil || !keepRx.MatchString(funcName) {
- break
- }
+ if pruneFromHere(fn.Name) {
+ break
}
}
}
diff --git a/vendor/github.com/moby/spdystream/connection.go b/vendor/github.com/moby/spdystream/connection.go
index d906bb05ce..1394d0ad4c 100644
--- a/vendor/github.com/moby/spdystream/connection.go
+++ b/vendor/github.com/moby/spdystream/connection.go
@@ -208,9 +208,10 @@ type Connection struct {
nextStreamId spdy.StreamId
receivedStreamId spdy.StreamId
- pingIdLock sync.Mutex
- pingId uint32
- pingChans map[uint32]chan error
+ // pingLock protects pingChans and pingId
+ pingLock sync.Mutex
+ pingId uint32
+ pingChans map[uint32]chan error
shutdownLock sync.Mutex
shutdownChan chan error
@@ -274,16 +275,20 @@ func NewConnection(conn net.Conn, server bool) (*Connection, error) {
// returns the response time
func (s *Connection) Ping() (time.Duration, error) {
pid := s.pingId
- s.pingIdLock.Lock()
+ s.pingLock.Lock()
if s.pingId > 0x7ffffffe {
s.pingId = s.pingId - 0x7ffffffe
} else {
s.pingId = s.pingId + 2
}
- s.pingIdLock.Unlock()
pingChan := make(chan error)
s.pingChans[pid] = pingChan
- defer delete(s.pingChans, pid)
+ s.pingLock.Unlock()
+ defer func() {
+ s.pingLock.Lock()
+ delete(s.pingChans, pid)
+ s.pingLock.Unlock()
+ }()
frame := &spdy.PingFrame{Id: pid}
startTime := time.Now()
@@ -612,10 +617,14 @@ func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
}
func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
- if s.pingId&0x01 != frame.Id&0x01 {
+ s.pingLock.Lock()
+ pingId := s.pingId
+ pingChan, pingOk := s.pingChans[frame.Id]
+ s.pingLock.Unlock()
+
+ if pingId&0x01 != frame.Id&0x01 {
return s.framer.WriteFrame(frame)
}
- pingChan, pingOk := s.pingChans[frame.Id]
if pingOk {
close(pingChan)
}
@@ -703,7 +712,9 @@ func (s *Connection) shutdown(closeTimeout time.Duration) {
var timeout <-chan time.Time
if closeTimeout > time.Duration(0) {
- timeout = time.After(closeTimeout)
+ timer := time.NewTimer(closeTimeout)
+ defer timer.Stop()
+ timeout = timer.C
}
streamsClosed := make(chan bool)
@@ -730,17 +741,23 @@ func (s *Connection) shutdown(closeTimeout time.Duration) {
}
if err != nil {
- duration := 10 * time.Minute
- time.AfterFunc(duration, func() {
- select {
- case err, ok := <-s.shutdownChan:
- if ok {
- debugMessage("Unhandled close error after %s: %s", duration, err)
- }
- default:
- }
- })
- s.shutdownChan <- err
+ // default to 1 second
+ duration := time.Second
+ // if a closeTimeout was given, use that, clipped to 1s-10m
+ if closeTimeout > time.Second {
+ duration = closeTimeout
+ }
+ if duration > 10*time.Minute {
+ duration = 10 * time.Minute
+ }
+ timer := time.NewTimer(duration)
+ defer timer.Stop()
+ select {
+ case s.shutdownChan <- err:
+ // error was handled
+ case <-timer.C:
+ debugMessage("Unhandled close error after %s: %s", duration, err)
+ }
}
close(s.shutdownChan)
}
@@ -799,7 +816,9 @@ func (s *Connection) CloseWait() error {
func (s *Connection) Wait(waitTimeout time.Duration) error {
var timeout <-chan time.Time
if waitTimeout > time.Duration(0) {
- timeout = time.After(waitTimeout)
+ timer := time.NewTimer(waitTimeout)
+ defer timer.Stop()
+ timeout = timer.C
}
select {
diff --git a/vendor/github.com/moby/spdystream/stream.go b/vendor/github.com/moby/spdystream/stream.go
index 404e3c02df..171c1e9e33 100644
--- a/vendor/github.com/moby/spdystream/stream.go
+++ b/vendor/github.com/moby/spdystream/stream.go
@@ -305,6 +305,8 @@ func (s *Stream) Identifier() uint32 {
// IsFinished returns whether the stream has finished
// sending data
func (s *Stream) IsFinished() bool {
+ s.finishLock.Lock()
+ defer s.finishLock.Unlock()
return s.finished
}
diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go
index ee4e7bb6ed..5646309e09 100644
--- a/vendor/github.com/modern-go/reflect2/safe_type.go
+++ b/vendor/github.com/modern-go/reflect2/safe_type.go
@@ -6,10 +6,12 @@ import (
)
type safeType struct {
- reflect.Type
- cfg *frozenConfig
+ Type reflect.Type
+ cfg *frozenConfig
}
+var _ Type = &safeType{}
+
func (type2 *safeType) New() interface{} {
return reflect.New(type2.Type).Interface()
}
@@ -18,6 +20,22 @@ func (type2 *safeType) UnsafeNew() unsafe.Pointer {
panic("does not support unsafe operation")
}
+func (type2 *safeType) Kind() reflect.Kind {
+ return type2.Type.Kind()
+}
+
+func (type2 *safeType) Len() int {
+ return type2.Type.Len()
+}
+
+func (type2 *safeType) NumField() int {
+ return type2.Type.NumField()
+}
+
+func (type2 *safeType) String() string {
+ return type2.Type.String()
+}
+
func (type2 *safeType) Elem() Type {
return type2.cfg.Type2(type2.Type.Elem())
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore
index 18793c248a..6faaaf3155 100644
--- a/vendor/github.com/onsi/ginkgo/v2/.gitignore
+++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore
@@ -4,4 +4,5 @@ tmp/**/*
*.coverprofile
.vscode
.idea/
-*.log
\ No newline at end of file
+*.log
+*.test
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
index fea67526e0..0921794114 100644
--- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -1,3 +1,395 @@
+## 2.27.2
+
+### Fixes
+- inline automaxprocs to simplify dependencies; this will be removed when Go 1.26 comes out [a69113a]
+
+### Maintenance
+- Fix syntax errors and typo [a99c6e0]
+- Fix paragraph position error [f993df5]
+
+## 2.27.1
+
+### Fixes
+- Fix Ginkgo Reporter slice-bounds panic [606c1cb]
+- Bug Fix: Add GinkoTBWrapper.Attr() and GinkoTBWrapper.Output() [a6463b3]
+
+## 2.27.0
+
+### Features
+
+#### Transforming Nodes during Tree Construction
+
+This release adds support for `NodeArgsTransformer`s that can be registered with `AddTreeConstructionNodeArgsTransformer`.
+
+These are called during the tree construction phase as nodes are constructed and can modify the node strings and decorators. This enables frameworks built on top of Ginkgo to modify Ginkgo nodes and enforce conventions.
+
+Learn more [here](https://onsi.github.io/ginkgo/#advanced-transforming-node-arguments-during-tree-construction).
+
+#### Spec Prioritization
+
+A new `SpecPriority(int)` decorator has been added. Ginkgo will honor priority when ordering specs, ensuring that higher priority specs start running before lower priority specs
+
+Learn more [here](https://onsi.github.io/ginkgo/#prioritizing-specs).
+
+### Maintenance
+- Bump rexml from 3.4.0 to 3.4.2 in /docs (#1595) [1333dae]
+- Bump github.com/gkampitakis/go-snaps from 0.5.14 to 0.5.15 (#1600) [17ae63e]
+
+## 2.26.0
+
+### Features
+
+Ginkgo can now generate json-formatted reports that are compatible with the `go test` json format. Use `ginkgo --gojson-report=report.go.json`. This is not intended to be a replacement for Ginkgo's native json format which is more information rich and better models Ginkgo's test structure semantics.
+
+## 2.25.3
+
+### Fixes
+
+- emit --github-output group only for progress report itself [f01aed1]
+
+## 2.25.2
+
+### Fixes
+Add github output group for progress report content
+
+### Maintenance
+Bump Gomega
+
+## 2.25.1
+
+### Fixes
+- fix(types): ignore nameless nodes on FullText() [10866d3]
+- chore: fix some CodeQL warnings [2e42cff]
+
+## 2.25.0
+
+### `AroundNode`
+
+This release introduces a new decorator to support more complex spec setup usecases.
+
+`AroundNode` registers a function that runs before each individual node. This is considered a more advanced decorator.
+
+Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information and some examples.
+
+Allowed signatures:
+
+- `AroundNode(func())` - `func` will be called before the node is run.
+- `AroundNode(func(ctx context.Context) context.Context)` - `func` can wrap the passed in context and return a new one which will be passed on to the node.
+- `AroundNode(func(ctx context.Context, body func(ctx context.Context)))` - `ctx` is the context for the node and `body` is a function that must be called to run the node. This gives you complete control over what runs before and after the node.
+
+Multiple `AroundNode` decorators can be applied to a single node and they will run in the order they are applied.
+
+Unlike setup nodes like `BeforeEach` and `DeferCleanup`, `AroundNode` is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call `runtime.LockOSThread()` in the `AroundNode` to ensure that the node runs on a single thread).
+
+Since `AroundNode` allows you to modify the context you can also use `AroundNode` to implement shared setup that attaches values to the context.
+
+If applied to a container, `AroundNode` will run before every node in the container. Including setup nodes like `BeforeEach` and `DeferCleanup`.
+
+`AroundNode` can also be applied to `RunSpecs` to run before every node in the suite. This opens up new mechanisms for instrumenting individual nodes across an entire suite.
+
+## 2.24.0
+
+### Features
+
+Specs can now be decorated with (e.g.) `SemVerConstraint("2.1.0")` and `ginkgo --sem-ver-filter="2.1.1"` will only run constrained specs that match the requested version. Learn more in the docs [here](https://onsi.github.io/ginkgo/#spec-semantic-version-filtering)! Thanks to @Icarus9913 for the PR.
+
+### Fixes
+
+- remove -o from run command [3f5d379]. fixes [#1582](https://github.com/onsi/ginkgo/issues/1582)
+
+### Maintenance
+
+Numerous dependency bumps and documentation fixes
+
+## 2.23.4
+
+Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container. Thanks to @emirot for the fix!
+
+### Features
+- Add automaxprocs for using CPUQuota [2b9c428]
+
+### Fixes
+- clarify gotchas about -vet flag [1f59d07]
+
+### Maintenance
+- bump dependencies [2d134d5]
+
+## 2.23.3
+
+### Fixes
+
+- allow `-` as a standalone argument [cfcc1a5]
+- Bug Fix: Add GinkoTBWrapper.Chdir() and GinkoTBWrapper.Context() [feaf292]
+- ignore exit code for symbol test on linux [88e2282]
+
+## 2.23.2
+
+🎉🎉🎉
+
+At long last, some long-standing performance gaps between `ginkgo` and `go test` have been resolved!
+
+Ginkgo operates by running `go test -c` to generate test binaries, and then running those binaries. It turns out that the compilation step of `go test -c` is slower than `go test`'s compilation step because `go test` strips out debug symbols (`ldflags=-w`) whereas `go test -c` does not.
+
+Ginkgo now passes the appropriate `ldflags` to `go test -c` when running specs to strip out symbols. This is only done when it is safe to do so and symbols are preferred when profiling is enabled and when `ginkgo build` is called explicitly.
+
+This, coupled, with the [instructions for disabling XProtect on MacOS](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) yields a much better performance experience with Ginkgo.
+
+## 2.23.1
+
+## 🚨 For users on MacOS 🚨
+
+A long-standing Ginkgo performance issue on MacOS seems to be due to mac's antimalware XProtect. You can follow the instructions [here](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) to disable it in your terminal. Doing so sped up Ginkgo's own test suite from 1m8s to 47s.
+
+### Fixes
+
+Ginkgo's CLI is now a bit clearer if you pass flags in incorrectly:
+
+- make it clearer that you need to pass a filename to the various profile flags, not an absolute directory [a0e52ff]
+- emit an error and exit if the ginkgo invocation includes flags after positional arguments [b799d8d]
+
+This might cause existing CI builds to fail. If so then it's likely that your CI build was misconfigured and should be corrected. Open an issue if you need help.
+
+## 2.23.0
+
+Ginkgo 2.23.0 adds a handful of methods to `GinkgoT()` to make it compatible with the `testing.TB` interface in Go 1.24. `GinkgoT().Context()`, in particular, is a useful shorthand for generating a new context that will clean itself up in a `DeferCleanup()`. This has subtle behavior differences from the golang implementation but should make sense in a Ginkgo... um... context.
+
+### Features
+- bump to go 1.24.0 - support new testing.TB methods and add a test to cover testing.TB regressions [37a511b]
+
+### Fixes
+- fix edge case where build -o is pointing at an explicit file, not a directory [7556a86]
+- Fix binary paths when precompiling multiple suites. [4df06c6]
+
+### Maintenance
+- Fix: Correct Markdown list rendering in MIGRATING_TO_V2.md [cbcf39a]
+- docs: fix test workflow badge (#1512) [9b261ff]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1516) [00f19c8]
+- Bump golang.org/x/tools from 0.28.0 to 0.30.0 (#1515) [e98a4df]
+- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#1504) [60cc4e2]
+- Bump github-pages from 231 to 232 in /docs (#1447) [fea6f2d]
+- Bump rexml from 3.2.8 to 3.3.9 in /docs (#1497) [31d7813]
+- Bump webrick from 1.8.1 to 1.9.1 in /docs (#1501) [fc3bbd6]
+- Code linting (#1500) [aee0d56]
+- change interface{} to any (#1502) [809a710]
+
+## 2.22.2
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.36.1 to 1.36.2 (#1499) [cc553ce]
+- Bump golang.org/x/crypto (#1498) [2170370]
+- Bump golang.org/x/net from 0.32.0 to 0.33.0 (#1496) [a96c44f]
+
+## 2.22.1
+
+### Fixes
+Fix CSV encoding
+- Update tests [aab3da6]
+- Properly encode CSV rows [c09df39]
+- Add test case for proper csv escaping [96a80fc]
+- Add meta-test [43dad69]
+
+### Maintenance
+- ensure *.test files are gitignored so we don't accidentally commit compiled tests again [c88c634]
+- remove golang.org/x/net/context in favour of stdlib context [4df44bf]
+
+## 2.22.0
+
+### Features
+- Add label to serial nodes [0fcaa08]
+
+This allows serial tests to be filtered using the `label-filter`
+
+### Maintenance
+Various doc fixes
+
+## 2.21.0
+
+
+ ### Features
+ - add support for GINKGO_TIME_FORMAT [a69eb39]
+ - add GINKGO_NO_COLOR to disable colors via environment variables [bcab9c8]
+
+ ### Fixes
+ - increase threshold in timeline matcher [e548367]
+ - Fix the document by replacing `SpecsThatWillBeRun` with `SpecsThatWillRun`
+ [c2c4d3c]
+
+ ### Maintenance
+ - bump various dependencies [7e65a00]
+
+## 2.20.2
+
+Require Go 1.22+
+
+### Maintenance
+- bump go to v1.22 [a671816]
+
+## 2.20.1
+
+### Fixes
+- make BeSpecEvent duration matcher more forgiving [d6f9640]
+
+## 2.20.0
+
+### Features
+- Add buildvcs flag [be5ab95]
+
+### Maintenance
+- Add update-deps to makefile [d303d14]
+- bump all dependencies [7a50221]
+
+## 2.19.1
+
+### Fixes
+- update supported platforms for race conditions [63c8c30]
+- [build] Allow custom name for binaries. [ff41e27]
+
+### Maintenance
+- bump gomega [76f4e0c]
+- Bump rexml from 3.2.6 to 3.2.8 in /docs (#1417) [b69c00d]
+- Bump golang.org/x/sys from 0.20.0 to 0.21.0 (#1425) [f097741]
+
+## 2.19.0
+
+### Features
+
+[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering.
+
+## 2.18.0
+
+### Features
+- Add --slience-skips and --force-newlines [f010b65]
+- fail when no tests were run and --fail-on-empty was set [d80eebe]
+
+### Fixes
+- Fix table entry context edge case [42013d6]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7]
+- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd]
+- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7]
+
+## 2.17.3
+
+### Fixes
+`ginkgo watch` now ignores hidden files [bde6e00]
+
+## 2.17.2
+
+### Fixes
+- fix: close files [32259c8]
+- fix github output log level for skipped specs [780e7a3]
+
+### Maintenance
+- Bump github.com/google/pprof [d91fe4e]
+- Bump github.com/go-task/slim-sprig to v3 [8cb662e]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422]
+- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4]
+- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8]
+- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4]
+- Fix test for gomega version bump [f2fcd97]
+- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2]
+- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26]
+- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170]
+- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a]
+
+## 2.17.1
+
+### Fixes
+- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d]
+
+## 2.17.0
+
+### Features
+
+- add `--github-output` for nicer output in github actions [e8a2056]
+
+### Maintenance
+
+- fix typo in core_dsl.go [977bc6f]
+- Fix typo in docs [e297e7b]
+
+## 2.16.0
+
+### Features
+- add SpecContext to reporting nodes
+
+### Fixes
+- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
+- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
+
+### Maintenance
+- docs/index.md: Typo [2cebe8d]
+- fix docs [06de431]
+- chore: test with Go 1.22 (#1352) [898cba9]
+- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
+- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
+- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
+- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
+- Fix docs for handling failures in goroutines (#1339) [4471b2e]
+
+## 2.15.0
+
+### Features
+
+- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70]
+- include cancellation reason when cancelling spec context [96e915c]
+
+### Fixes
+
+- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09]
+- fix outline when using nodot in ginkgo v2 [dca77c8]
+- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f]
+- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14]
+
+### Maintenance
+
+- Bump to go 1.20 [4fcd0b3]
+
+## 2.14.0
+
+### Features
+You can now use `GinkgoTB()` when you need an instance of `testing.TB` to pass to a library.
+
+Prior to this release table testing only supported generating individual `It`s for each test entry. `DescribeTableSubtree` extends table testing support to entire testing subtrees - under the hood `DescrieTableSubtree` generates a new container for each entry and invokes your function to fill our the container. See the [docs](https://onsi.github.io/ginkgo/#generating-subtree-tables) to learn more.
+
+- Introduce DescribeTableSubtree [65ec56d]
+- add GinkgoTB() to docs [4a2c832]
+- Add GinkgoTB() function (#1333) [92b6744]
+
+### Fixes
+- Fix typo in internal/suite.go (#1332) [beb9507]
+- Fix typo in docs/index.md (#1319) [4ac3a13]
+- allow wasm to compile with ginkgo present (#1311) [b2e5bc5]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.16.0 to 0.16.1 (#1316) [465a8ec]
+- Bump actions/setup-go from 4 to 5 (#1313) [eab0e40]
+- Bump github/codeql-action from 2 to 3 (#1317) [fbf9724]
+- Bump golang.org/x/crypto (#1318) [3ee80ee]
+- Bump golang.org/x/tools from 0.14.0 to 0.16.0 (#1306) [123e1d5]
+- Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#1297) [558f6e0]
+- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#1307) [84ff7f3]
+
+## 2.13.2
+
+### Fixes
+- Fix file handler leak (#1309) [e2e81c8]
+- Avoid allocations with `(*regexp.Regexp).MatchString` (#1302) [3b2a2a7]
+
+## 2.13.1
+
+### Fixes
+- # 1296 fix(precompiled test guite): exec bit check omitted on Windows (#1301) [26eea01]
+
+### Maintenance
+- Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#1291) [7161a9d]
+- Bump golang.org/x/sys from 0.13.0 to 0.14.0 (#1295) [7fc7b10]
+- Bump golang.org/x/tools from 0.12.0 to 0.14.0 (#1282) [74bbd65]
+- Bump github.com/onsi/gomega from 1.27.10 to 1.29.0 (#1290) [9373633]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1286) [6e3cf65]
+
## 2.13.0
### Features
@@ -412,7 +804,7 @@ Ginkgo also uses this progress reporting infrastructure under the hood when hand
### Features
- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
- As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
+ As a result the **signature of these methods has changed** and now includes a trailing `args ...any`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
### Maintenance
- Modernize the invocation of Ginkgo in github actions [0ffde58]
@@ -824,7 +1216,7 @@ New Features:
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
- `ginkgo --failFast` aborts the test suite after the first failure.
- `ginkgo generate file_1 file_2` can take multiple file arguments.
-- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
+- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
Improvements:
@@ -858,7 +1250,7 @@ Bug Fixes:
Breaking changes:
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
-- Modified the Reporter interface
+- Modified the Reporter interface
- `watch` is now a subcommand, not a flag.
DSL changes:
diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
index 1da92fe7ee..80de566a52 100644
--- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
+++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
@@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp
- Ensure adequate test coverage:
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
-- Make sure all the tests succeed via `ginkgo -r -p`
-- Vet your changes via `go vet ./...`
-- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes.
+- Run `make` or:
+ - Install ginkgo locally via `go install ./...`
+ - Make sure all the tests succeed via `ginkgo -r -p`
+ - Vet your changes via `go vet ./...`
+- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes.
-Thanks for supporting Ginkgo!
\ No newline at end of file
+Thanks for supporting Ginkgo!
diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile
new file mode 100644
index 0000000000..06dff97cdc
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/Makefile
@@ -0,0 +1,16 @@
+# default task since it's first
+.PHONY: all
+all: vet test
+
+.PHONY: test
+test:
+ go run github.com/onsi/ginkgo/v2/ginkgo -r -p -randomize-all -keep-going
+
+.PHONY: vet
+vet:
+ go vet ./...
+
+.PHONY: update-deps
+update-deps:
+ go get -u ./...
+ go mod tidy
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md
index cb23ffdf6a..7b7ab9e39c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/README.md
+++ b/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -1,6 +1,6 @@

-[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
+[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
---
@@ -113,3 +113,13 @@ Ginkgo is MIT-Licensed
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md)
+
+## Sponsors
+
+Sponsors commit to a [sponsorship](https://github.com/sponsors/onsi) for a year. If you're an organization that makes use of Ginkgo please consider becoming a sponsor!
+
+Browser testing via
+
+
+
+
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
index 2d7a70eccb..7e165e4738 100644
--- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -83,9 +83,9 @@ func exitIfErrors(errors []error) {
type GinkgoWriterInterface interface {
io.Writer
- Print(a ...interface{})
- Printf(format string, a ...interface{})
- Println(a ...interface{})
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
TeeTo(writer io.Writer)
ClearTeeWriters()
@@ -186,6 +186,20 @@ func GinkgoLabelFilter() string {
return suiteConfig.LabelFilter
}
+/*
+GinkgoSemVerFilter() returns the semantic version filter configured for this suite via `--sem-ver-filter`.
+
+You can use this to manually check if a set of semantic version constraints would satisfy the filter via:
+
+ if (SemVerConstraint("> 2.6.0", "< 2.8.0").MatchesSemVerFilter(GinkgoSemVerFilter())) {
+ //...
+ }
+*/
+func GinkgoSemVerFilter() string {
+ suiteConfig, _ := GinkgoConfiguration()
+ return suiteConfig.SemVerFilter
+}
+
/*
PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
when running in parallel and output to stdout/stderr is being intercepted. You generally
@@ -243,7 +257,7 @@ for more on how specs are parallelized in Ginkgo.
You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
*/
-func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
+func RunSpecs(t GinkgoTestingT, description string, args ...any) bool {
if suiteDidRun {
exitIfErr(types.GinkgoErrors.RerunningSuite())
}
@@ -254,7 +268,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
}
defer global.PopClone()
- suiteLabels := extractSuiteConfiguration(args)
+ suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
var reporter reporters.Reporter
if suiteConfig.ParallelTotal == 1 {
@@ -292,12 +306,12 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
err = global.Suite.BuildTree()
exitIfErr(err)
- suitePath, err := os.Getwd()
+ suitePath, err := getwd()
exitIfErr(err)
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
- passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
outputInterceptor.Shutdown()
flagSet.ValidateDeprecations(deprecationTracker)
@@ -316,8 +330,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
return passed
}
-func extractSuiteConfiguration(args []interface{}) Labels {
+func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) {
suiteLabels := Labels{}
+ suiteSemVerConstraints := SemVerConstraints{}
+ aroundNodes := types.AroundNodes{}
configErrors := []error{}
for _, arg := range args {
switch arg := arg.(type) {
@@ -327,6 +343,10 @@ func extractSuiteConfiguration(args []interface{}) Labels {
reporterConfig = arg
case Labels:
suiteLabels = append(suiteLabels, arg...)
+ case SemVerConstraints:
+ suiteSemVerConstraints = append(suiteSemVerConstraints, arg...)
+ case types.AroundNodeDecorator:
+ aroundNodes = append(aroundNodes, arg)
default:
configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
}
@@ -342,7 +362,16 @@ func extractSuiteConfiguration(args []interface{}) Labels {
os.Exit(1)
}
- return suiteLabels
+ return suiteLabels, suiteSemVerConstraints, aroundNodes
+}
+
+func getwd() (string, error) {
+ if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
+ // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
+ // is shared between two different directories with the same test code.
+ return os.Getwd()
+ }
+ return "", nil
}
/*
@@ -356,7 +385,7 @@ func PreviewSpecs(description string, args ...any) Report {
}
defer global.PopClone()
- suiteLabels := extractSuiteConfiguration(args)
+ suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess
suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1
defer func() {
@@ -369,12 +398,12 @@ func PreviewSpecs(description string, args ...any) Report {
err = global.Suite.BuildTree()
exitIfErr(err)
- suitePath, err := os.Getwd()
+ suitePath, err := getwd()
exitIfErr(err)
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
- global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
return global.Suite.GetPreviewReport()
}
@@ -472,6 +501,38 @@ func pushNode(node internal.Node, errors []error) bool {
return true
}
+// NodeArgsTransformer is a hook which is called by the test construction DSL methods
+// before creating the new node. If it returns any error, the test suite
+// prints those errors and exits. The text and arguments can be modified,
+// which includes directly changing the args slice that is passed in.
+// Arguments have been flattened already, i.e. none of the entries in args is another []any.
+// The result may be nested.
+//
+// The node type is provided for information and remains the same.
+//
+// The offset is valid for calling NewLocation directly in the
+// implementation of TransformNodeArgs to find the location where
+// the Ginkgo DSL function is called. An additional offset supplied
+// by the caller via args is already included.
+//
+// A NodeArgsTransformer can be registered with AddTreeConstructionNodeArgsTransformer.
+type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error)
+
+// AddTreeConstructionNodeArgsTransformer registers a NodeArgsTransformer.
+// Only nodes which get created after registering a NodeArgsTransformer
+// are transformed by it. The returned function can be called to
+// unregister the transformer.
+//
+// Both may only be called during the construction phase.
+//
+// If there is more than one registered transformer, then the most
+// recently added ones get called first.
+func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() {
+ // This conversion could be avoided with a type alias, but type aliases make
+ // developer documentation less useful.
+ return internal.AddTreeConstructionNodeArgsTransformer(internal.NodeArgsTransformer(transformer))
+}
+
/*
Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of
Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
@@ -482,24 +543,24 @@ to Describe the behavior of an object or function and, within that Describe, out
You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
*/
-func Describe(text string, args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+func Describe(text string, args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...)))
}
/*
FDescribe focuses specs within the Describe block.
*/
-func FDescribe(text string, args ...interface{}) bool {
+func FDescribe(text string, args ...any) bool {
args = append(args, internal.Focus)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...)))
}
/*
PDescribe marks specs within the Describe block as pending.
*/
-func PDescribe(text string, args ...interface{}) bool {
+func PDescribe(text string, args ...any) bool {
args = append(args, internal.Pending)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...)))
}
/*
@@ -512,21 +573,21 @@ var XDescribe = PDescribe
/* Context is an alias for Describe - it generates the exact same kind of Container node */
var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
-/* When is an alias for Describe - it generates the exact same kind of Container node */
-func When(text string, args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */
+func When(text string, args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...)))
}
-/* When is an alias for Describe - it generates the exact same kind of Container node */
-func FWhen(text string, args ...interface{}) bool {
+/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */
+func FWhen(text string, args ...any) bool {
args = append(args, internal.Focus)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...)))
}
/* When is an alias for Describe - it generates the exact same kind of Container node */
-func PWhen(text string, args ...interface{}) bool {
+func PWhen(text string, args ...any) bool {
args = append(args, internal.Pending)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...)))
}
var XWhen = PWhen
@@ -541,24 +602,24 @@ You can pass It nodes bare functions (func() {}) or functions that receive a Spe
You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
*/
-func It(text string, args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+func It(text string, args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...)))
}
/*
FIt allows you to focus an individual It.
*/
-func FIt(text string, args ...interface{}) bool {
+func FIt(text string, args ...any) bool {
args = append(args, internal.Focus)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...)))
}
/*
PIt allows you to mark an individual It as pending.
*/
-func PIt(text string, args ...interface{}) bool {
+func PIt(text string, args ...any) bool {
args = append(args, internal.Pending)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...)))
}
/*
@@ -602,10 +663,10 @@ BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(c
You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
*/
-func BeforeSuite(body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func BeforeSuite(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)))
}
/*
@@ -621,10 +682,10 @@ AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(co
You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
*/
-func AfterSuite(body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func AfterSuite(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)))
}
/*
@@ -658,11 +719,11 @@ If either function receives a context.Context/SpecContext it is considered inter
You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
*/
-func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{process1Body, allProcessBody}
+func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) bool {
+ combinedArgs := []any{process1Body, allProcessBody}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)))
}
/*
@@ -678,11 +739,11 @@ Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accompli
You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
*/
-func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{allProcessBody, process1Body}
+func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) bool {
+ combinedArgs := []any{allProcessBody, process1Body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)))
}
/*
@@ -694,8 +755,8 @@ BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(co
You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
*/
-func BeforeEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
+func BeforeEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeEach, "", args...)))
}
/*
@@ -707,8 +768,8 @@ JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/fun
You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
*/
-func JustBeforeEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
+func JustBeforeEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)))
}
/*
@@ -722,8 +783,8 @@ AfterEach can take a func() body, or an interruptible func(SpecContext)/func(con
You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
*/
-func AfterEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
+func AfterEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterEach, "", args...)))
}
/*
@@ -734,8 +795,8 @@ JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func
You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
*/
-func JustAfterEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
+func JustAfterEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustAfterEach, "", args...)))
}
/*
@@ -749,8 +810,8 @@ You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
*/
-func BeforeAll(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
+func BeforeAll(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeAll, "", args...)))
}
/*
@@ -766,8 +827,8 @@ You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
*/
-func AfterAll(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
+func AfterAll(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterAll, "", args...)))
}
/*
@@ -783,8 +844,8 @@ DeferCleanup can be passed:
For example:
BeforeEach(func() {
- DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO"))
- os.SetEnv("FOO", "BAR")
+ DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO"))
+ os.Setenv("FOO", "BAR")
})
will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
@@ -809,7 +870,7 @@ When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite,
Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
*/
-func DeferCleanup(args ...interface{}) {
+func DeferCleanup(args ...any) {
fail := func(message string, cl types.CodeLocation) {
global.Failer.Fail(message, cl)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
index c65af4ce1c..e331d7cf8c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
@@ -2,6 +2,7 @@ package ginkgo
import (
"github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/types"
)
/*
@@ -99,6 +100,23 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
*/
type Labels = internal.Labels
+/*
+SemVerConstraint decorates specs with SemVerConstraints. Multiple semantic version constraints can be passed to SemVerConstraint and these strings must follow the semantic version constraint rules.
+SemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple SemVerConstraints to a given node and a spec's semantic version constraints is the union of all semantic version constraints in its node hierarchy.
+
+You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func SemVerConstraint(semVerConstraints ...string) SemVerConstraints {
+ return SemVerConstraints(semVerConstraints)
+}
+
+/*
+SemVerConstraints are the type for spec SemVerConstraint decorators. Use SemVerConstraint(...) to construct SemVerConstraints.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering
+*/
+type SemVerConstraints = internal.SemVerConstraints
+
/*
PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node.
@@ -136,8 +154,40 @@ Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will pro
*/
type GracePeriod = internal.GracePeriod
+/*
+SpecPriority allows you to assign a priority to a spec or container.
+
+Specs with higher priority will be scheduled to run before specs with lower priority. The default priority is 0 and negative priorities are allowed.
+*/
+type SpecPriority = internal.SpecPriority
+
/*
SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly
if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports.
*/
const SuppressProgressReporting = internal.SuppressProgressReporting
+
+/*
+AroundNode registers a function that runs before each individual node. This is considered a more advanced decorator.
+
+Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information.
+
+Allowed signatures:
+
+- AroundNode(func()) - func will be called before the node is run.
+- AroundNode(func(ctx context.Context) context.Context) - func can wrap the passed in context and return a new one which will be passed on to the node.
+- AroundNode(func(ctx context.Context, body func(ctx context.Context))) - ctx is the context for the node and body is a function that must be called to run the node. This gives you complete control over what runs before and after the node.
+
+Multiple AroundNode decorators can be applied to a single node and they will run in the order they are applied.
+
+Unlike setup nodes like BeforeEach and DeferCleanup, AroundNode is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call runtime.LockOSThread() in the AroundNode to ensure that the node runs on a single thread).
+
+Since AroundNode allows you to modify the context you can also use AroundNode to implement shared setup that attaches values to the context. You must return a context that inherits from the passed in context.
+
+If applied to a container, AroundNode will run before every node in the container. Including setup nodes like BeforeEach and DeferCleanup.
+
+AroundNode can also be applied to RunSpecs to run before every node in the suite.
+*/
+func AroundNode[F types.AroundNodeAllowedFuncs](f F) types.AroundNodeDecorator {
+ return types.AroundNode(f, types.NewCodeLocation(1))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
index f912bbec65..fd45b8beab 100644
--- a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
@@ -118,9 +118,9 @@ Use Gomega's gmeasure package instead.
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
*/
type Benchmarker interface {
- Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
- RecordValue(name string, value float64, info ...interface{})
- RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
+ Time(name string, body func(), info ...any) (elapsedTime time.Duration)
+ RecordValue(name string, value float64, info ...any)
+ RecordValueWithPrecision(name string, value float64, units string, precision int, info ...any)
}
/*
@@ -129,7 +129,7 @@ Deprecated: Measure() has been removed from Ginkgo 2.0
Use Gomega's gmeasure package instead.
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
*/
-func Measure(_ ...interface{}) bool {
+func Measure(_ ...any) bool {
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
return true
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
index 743555ddea..f61356db19 100644
--- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
@@ -24,15 +24,15 @@ const (
var SingletonFormatter = New(ColorModeTerminal)
-func F(format string, args ...interface{}) string {
+func F(format string, args ...any) string {
return SingletonFormatter.F(format, args...)
}
-func Fi(indentation uint, format string, args ...interface{}) string {
+func Fi(indentation uint, format string, args ...any) string {
return SingletonFormatter.Fi(indentation, format, args...)
}
-func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+func Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
}
@@ -82,6 +82,10 @@ func New(colorMode ColorMode) Formatter {
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
}
+ if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor {
+ colorMode = ColorModeNone
+ }
+
f := Formatter{
ColorMode: colorMode,
colors: map[string]string{
@@ -111,15 +115,15 @@ func New(colorMode ColorMode) Formatter {
return f
}
-func (f Formatter) F(format string, args ...interface{}) string {
+func (f Formatter) F(format string, args ...any) string {
return f.Fi(0, format, args...)
}
-func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
+func (f Formatter) Fi(indentation uint, format string, args ...any) string {
return f.Fiw(indentation, 0, format, args...)
}
-func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
out := f.style(format)
if len(args) > 0 {
out = fmt.Sprintf(out, args...)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go
new file mode 100644
index 0000000000..ee6ac7b5f3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go
@@ -0,0 +1,8 @@
+//go:build !go1.25
+// +build !go1.25
+
+package main
+
+import (
+ _ "github.com/onsi/ginkgo/v2/ginkgo/automaxprocs"
+)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md
new file mode 100644
index 0000000000..e249ebe8b3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md
@@ -0,0 +1,3 @@
+This entire directory is a lightly modified clone of https://github.com/uber-go/automaxprocs
+
+It will be removed when Go 1.26 ships and we no longer need to support Go 1.24 (which does not correctly autodetect maxprocs in containers).
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go
new file mode 100644
index 0000000000..8a762b51d6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to
+// match the configured Linux CPU quota. Unlike the top-level automaxprocs
+// package, it lets the caller configure logging and handle errors.
+package automaxprocs
+
+import (
+ "os"
+ "runtime"
+)
+
+func init() {
+ Set()
+}
+
+const _maxProcsKey = "GOMAXPROCS"
+
+type config struct {
+ procs func(int, func(v float64) int) (int, CPUQuotaStatus, error)
+ minGOMAXPROCS int
+ roundQuotaFunc func(v float64) int
+}
+
+// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning
+// any error encountered and an undo function.
+//
+// Set is a no-op on non-Linux systems and in Linux environments without a
+// configured CPU quota.
+func Set() error {
+ cfg := &config{
+ procs: CPUQuotaToGOMAXPROCS,
+ roundQuotaFunc: DefaultRoundFunc,
+ minGOMAXPROCS: 1,
+ }
+
+ // Honor the GOMAXPROCS environment variable if present. Otherwise, amend
+ // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is
+ // Linux, and guarantee a minimum value of 1. The minimum guaranteed value
+ // can be overridden using `maxprocs.Min()`.
+ if _, exists := os.LookupEnv(_maxProcsKey); exists {
+ return nil
+ }
+ maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc)
+ if err != nil {
+ return err
+ }
+ if status == CPUQuotaUndefined {
+ return nil
+ }
+ runtime.GOMAXPROCS(maxProcs)
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go
new file mode 100644
index 0000000000..a4676933e8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+// CGroup represents the data structure for a Linux control group.
+type CGroup struct {
+ path string
+}
+
+// NewCGroup returns a new *CGroup from a given path.
+func NewCGroup(path string) *CGroup {
+ return &CGroup{path: path}
+}
+
+// Path returns the path of the CGroup*.
+func (cg *CGroup) Path() string {
+ return cg.path
+}
+
+// ParamPath returns the path of the given cgroup param under itself.
+func (cg *CGroup) ParamPath(param string) string {
+ return filepath.Join(cg.path, param)
+}
+
+// readFirstLine reads the first line from a cgroup param file.
+func (cg *CGroup) readFirstLine(param string) (string, error) {
+ paramFile, err := os.Open(cg.ParamPath(param))
+ if err != nil {
+ return "", err
+ }
+ defer paramFile.Close()
+
+ scanner := bufio.NewScanner(paramFile)
+ if scanner.Scan() {
+ return scanner.Text(), nil
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ return "", io.ErrUnexpectedEOF
+}
+
+// readInt parses the first line from a cgroup param file as int.
+func (cg *CGroup) readInt(param string) (int, error) {
+ text, err := cg.readFirstLine(param)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.Atoi(text)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go
new file mode 100644
index 0000000000..ed384891ef
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+const (
+ // _cgroupFSType is the Linux CGroup file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupFSType = "cgroup"
+ // _cgroupSubsysCPU is the CPU CGroup subsystem.
+ _cgroupSubsysCPU = "cpu"
+ // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem.
+ _cgroupSubsysCPUAcct = "cpuacct"
+ // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem.
+ _cgroupSubsysCPUSet = "cpuset"
+ // _cgroupSubsysMemory is the Memory CGroup subsystem.
+ _cgroupSubsysMemory = "memory"
+
+ // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota
+ // parameter.
+ _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us"
+ // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period
+ // parameter.
+ _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us"
+)
+
+const (
+ _procPathCGroup = "/proc/self/cgroup"
+ _procPathMountInfo = "/proc/self/mountinfo"
+)
+
+// CGroups is a map that associates each CGroup with its subsystem name.
+type CGroups map[string]*CGroup
+
+// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files
+// under for some process under `/proc` file system (see also proc(5) for more
+// information).
+func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) {
+ cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ cgroups := make(CGroups)
+ newMountPoint := func(mp *MountPoint) error {
+ if mp.FSType != _cgroupFSType {
+ return nil
+ }
+
+ for _, opt := range mp.SuperOptions {
+ subsys, exists := cgroupSubsystems[opt]
+ if !exists {
+ continue
+ }
+
+ cgroupPath, err := mp.Translate(subsys.Name)
+ if err != nil {
+ return err
+ }
+ cgroups[opt] = NewCGroup(cgroupPath)
+ }
+
+ return nil
+ }
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return nil, err
+ }
+ return cgroups, nil
+}
+
+// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current
+// process.
+func NewCGroupsForCurrentProcess() (CGroups, error) {
+ return NewCGroups(_procPathMountInfo, _procPathCGroup)
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup controller.
+// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of
+// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`.
+func (cg CGroups) CPUQuota() (float64, bool, error) {
+ cpuCGroup, exists := cg[_cgroupSubsysCPU]
+ if !exists {
+ return -1, false, nil
+ }
+
+ cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam)
+ if defined := cfsQuotaUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam)
+ if defined := cfsPeriodUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go
new file mode 100644
index 0000000000..69a0be6b71
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+const (
+ // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period
+ // parameter.
+ _cgroupv2CPUMax = "cpu.max"
+ // _cgroupFSType is the Linux CGroup-V2 file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupv2FSType = "cgroup2"
+
+ _cgroupv2MountPoint = "/sys/fs/cgroup"
+
+ _cgroupV2CPUMaxDefaultPeriod = 100000
+ _cgroupV2CPUMaxQuotaMax = "max"
+)
+
+const (
+ _cgroupv2CPUMaxQuotaIndex = iota
+ _cgroupv2CPUMaxPeriodIndex
+)
+
+// ErrNotV2 indicates that the system is not using cgroups2.
+var ErrNotV2 = errors.New("not using cgroups2")
+
+// CGroups2 provides access to cgroups data for systems using cgroups2.
+type CGroups2 struct {
+ mountPoint string
+ groupPath string
+ cpuMaxFile string
+}
+
+// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process.
+//
+// This returns ErrNotV2 if the system is not using cgroups2.
+func NewCGroups2ForCurrentProcess() (*CGroups2, error) {
+ return newCGroups2From(_procPathMountInfo, _procPathCGroup)
+}
+
+func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) {
+ isV2, err := isCGroupV2(mountInfoPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isV2 {
+ return nil, ErrNotV2
+ }
+
+ subsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find v2 subsystem by looking for the `0` id
+ var v2subsys *CGroupSubsys
+ for _, subsys := range subsystems {
+ if subsys.ID == 0 {
+ v2subsys = subsys
+ break
+ }
+ }
+
+ if v2subsys == nil {
+ return nil, ErrNotV2
+ }
+
+ return &CGroups2{
+ mountPoint: _cgroupv2MountPoint,
+ groupPath: v2subsys.Name,
+ cpuMaxFile: _cgroupv2CPUMax,
+ }, nil
+}
+
+func isCGroupV2(procPathMountInfo string) (bool, error) {
+ var (
+ isV2 bool
+ newMountPoint = func(mp *MountPoint) error {
+ isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint)
+ return nil
+ }
+ )
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return false, err
+ }
+
+ return isV2, nil
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller.
+// It is a result of reading cpu quota and period from cpu.max file.
+// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns
+// (-1, false, nil)
+func (cg *CGroups2) CPUQuota() (float64, bool, error) {
+ cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return -1, false, nil
+ }
+ return -1, false, err
+ }
+ defer cpuMaxParams.Close()
+
+ scanner := bufio.NewScanner(cpuMaxParams)
+ if scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 0 || len(fields) > 2 {
+ return -1, false, fmt.Errorf("invalid format")
+ }
+
+ if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax {
+ return -1, false, nil
+ }
+
+ max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ var period int
+ if len(fields) == 1 {
+ period = _cgroupV2CPUMaxDefaultPeriod
+ } else {
+ period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ if period == 0 {
+ return -1, false, errors.New("zero value for period is not allowed")
+ }
+ }
+
+ return float64(max) / float64(period), true, nil
+ }
+
+ if err := scanner.Err(); err != nil {
+ return -1, false, err
+ }
+
+ return 0, false, io.ErrUnexpectedEOF
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go
new file mode 100644
index 0000000000..2d83343bd9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "errors"
+)
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value. The quota is converted from float to int using round.
+// If round == nil, DefaultRoundFunc is used.
+func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) {
+ if round == nil {
+ round = DefaultRoundFunc
+ }
+ cgroups, err := _newQueryer()
+ if err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ quota, defined, err := cgroups.CPUQuota()
+ if !defined || err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ maxProcs := round(quota)
+ if minValue > 0 && maxProcs < minValue {
+ return minValue, CPUQuotaMinUsed, nil
+ }
+ return maxProcs, CPUQuotaUsed, nil
+}
+
+type queryer interface {
+ CPUQuota() (float64, bool, error)
+}
+
+var (
+ _newCgroups2 = NewCGroups2ForCurrentProcess
+ _newCgroups = NewCGroupsForCurrentProcess
+ _newQueryer = newQueryer
+)
+
+func newQueryer() (queryer, error) {
+ cgroups, err := _newCgroups2()
+ if err == nil {
+ return cgroups, nil
+ }
+ if errors.Is(err, ErrNotV2) {
+ return _newCgroups()
+ }
+ return nil, err
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go
new file mode 100644
index 0000000000..d2d61e8941
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !linux
+// +build !linux
+
+package automaxprocs
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
+// current OS.
+func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) {
+ return -1, CPUQuotaUndefined, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go
new file mode 100644
index 0000000000..2e235d7d65
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import "fmt"
+
+type cgroupSubsysFormatInvalidError struct {
+ line string
+}
+
+type mountPointFormatInvalidError struct {
+ line string
+}
+
+type pathNotExposedFromMountPointError struct {
+ mountPoint string
+ root string
+ path string
+}
+
+func (err cgroupSubsysFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line)
+}
+
+func (err mountPointFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for MountPoint: %q", err.line)
+}
+
+func (err pathNotExposedFromMountPointError) Error() string {
+ return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go
new file mode 100644
index 0000000000..7c3fa306ef
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const (
+ _mountInfoSep = " "
+ _mountInfoOptsSep = ","
+ _mountInfoOptionalFieldsSep = "-"
+)
+
+const (
+ _miFieldIDMountID = iota
+ _miFieldIDParentID
+ _miFieldIDDeviceID
+ _miFieldIDRoot
+ _miFieldIDMountPoint
+ _miFieldIDOptions
+ _miFieldIDOptionalFields
+
+ _miFieldCountFirstHalf
+)
+
+const (
+ _miFieldOffsetFSType = iota
+ _miFieldOffsetMountSource
+ _miFieldOffsetSuperOptions
+
+ _miFieldCountSecondHalf
+)
+
+const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf
+
+// MountPoint is the data structure for the mount points in
+// `/proc/$PID/mountinfo`. See also proc(5) for more information.
+type MountPoint struct {
+ MountID int
+ ParentID int
+ DeviceID string
+ Root string
+ MountPoint string
+ Options []string
+ OptionalFields []string
+ FSType string
+ MountSource string
+ SuperOptions []string
+}
+
+// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and
+// returns a new *MountPoint.
+func NewMountPointFromLine(line string) (*MountPoint, error) {
+ fields := strings.Split(line, _mountInfoSep)
+
+ if len(fields) < _miFieldCountMin {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ mountID, err := strconv.Atoi(fields[_miFieldIDMountID])
+ if err != nil {
+ return nil, err
+ }
+
+ parentID, err := strconv.Atoi(fields[_miFieldIDParentID])
+ if err != nil {
+ return nil, err
+ }
+
+ for i, field := range fields[_miFieldIDOptionalFields:] {
+ if field == _mountInfoOptionalFieldsSep {
+ // End of optional fields.
+ fsTypeStart := _miFieldIDOptionalFields + i + 1
+
+ // Now we know where the optional fields end, split the line again with a
+ // limit to avoid issues with spaces in super options as present on WSL.
+ fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf)
+ if len(fields) != fsTypeStart+_miFieldCountSecondHalf {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart
+ miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart
+ miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart
+
+ return &MountPoint{
+ MountID: mountID,
+ ParentID: parentID,
+ DeviceID: fields[_miFieldIDDeviceID],
+ Root: fields[_miFieldIDRoot],
+ MountPoint: fields[_miFieldIDMountPoint],
+ Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep),
+ OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)],
+ FSType: fields[miFieldIDFSType],
+ MountSource: fields[miFieldIDMountSource],
+ SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep),
+ }, nil
+ }
+ }
+
+ return nil, mountPointFormatInvalidError{line}
+}
+
+// Translate converts an absolute path inside the *MountPoint's file system to
+// the host file system path in the mount namespace the *MountPoint belongs to.
+func (mp *MountPoint) Translate(absPath string) (string, error) {
+ relPath, err := filepath.Rel(mp.Root, absPath)
+
+ if err != nil {
+ return "", err
+ }
+ if relPath == ".." || strings.HasPrefix(relPath, "../") {
+ return "", pathNotExposedFromMountPointError{
+ mountPoint: mp.MountPoint,
+ root: mp.Root,
+ path: absPath,
+ }
+ }
+
+ return filepath.Join(mp.MountPoint, relPath), nil
+}
+
+// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`)
+// and yields parsed *MountPoint into newMountPoint.
+func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error {
+ mountInfoFile, err := os.Open(procPathMountInfo)
+ if err != nil {
+ return err
+ }
+ defer mountInfoFile.Close()
+
+ scanner := bufio.NewScanner(mountInfoFile)
+
+ for scanner.Scan() {
+ mountPoint, err := NewMountPointFromLine(scanner.Text())
+ if err != nil {
+ return err
+ }
+ if err := newMountPoint(mountPoint); err != nil {
+ return err
+ }
+ }
+
+ return scanner.Err()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go
new file mode 100644
index 0000000000..b8ec7e502a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package automaxprocs
+
+import "math"
+
+// CPUQuotaStatus presents the status of how CPU quota is used
+type CPUQuotaStatus int
+
+const (
+ // CPUQuotaUndefined is returned when CPU quota is undefined
+ CPUQuotaUndefined CPUQuotaStatus = iota
+ // CPUQuotaUsed is returned when a valid CPU quota can be used
+ CPUQuotaUsed
+ // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value
+ CPUQuotaMinUsed
+)
+
+// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor).
+func DefaultRoundFunc(v float64) int {
+ return int(math.Floor(v))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go
new file mode 100644
index 0000000000..881ebd5902
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ _cgroupSep = ":"
+ _cgroupSubsysSep = ","
+)
+
+const (
+ _csFieldIDID = iota
+ _csFieldIDSubsystems
+ _csFieldIDName
+ _csFieldCount
+)
+
+// CGroupSubsys represents the data structure for entities in
+// `/proc/$PID/cgroup`. See also proc(5) for more information.
+type CGroupSubsys struct {
+ ID int
+ Subsystems []string
+ Name string
+}
+
+// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in
+// the format of `/proc/$PID/cgroup`
+func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) {
+ fields := strings.SplitN(line, _cgroupSep, _csFieldCount)
+
+ if len(fields) != _csFieldCount {
+ return nil, cgroupSubsysFormatInvalidError{line}
+ }
+
+ id, err := strconv.Atoi(fields[_csFieldIDID])
+ if err != nil {
+ return nil, err
+ }
+
+ cgroup := &CGroupSubsys{
+ ID: id,
+ Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep),
+ Name: fields[_csFieldIDName],
+ }
+
+ return cgroup, nil
+}
+
+// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`)
+// and returns a new map[string]*CGroupSubsys.
+func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) {
+ cgroupFile, err := os.Open(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+ defer cgroupFile.Close()
+
+ scanner := bufio.NewScanner(cgroupFile)
+ subsystems := make(map[string]*CGroupSubsys)
+
+ for scanner.Scan() {
+ cgroup, err := NewCGroupSubsysFromLine(scanner.Text())
+ if err != nil {
+ return nil, err
+ }
+ for _, subsys := range cgroup.Subsystems {
+ subsystems[subsys] = cgroup
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return subsystems, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
index 5db5d1a7bf..3021dfec2e 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
@@ -2,6 +2,8 @@ package build
import (
"fmt"
+ "os"
+ "path"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal"
@@ -27,7 +29,6 @@ func BuildBuildCommand() command.Command {
var errors []error
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
-
buildSpecs(args, cliConfig, goFlagsConfig)
},
}
@@ -42,7 +43,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
internal.VerifyCLIAndFrameworkVersion(suites)
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
- opc.StartCompiling(suites, goFlagsConfig)
+ opc.StartCompiling(suites, goFlagsConfig, true)
for {
suiteIdx, suite := opc.Next()
@@ -53,7 +54,22 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
fmt.Println(suite.CompilationError.Error())
} else {
- fmt.Printf("Compiled %s.test\n", suite.PackageName)
+ var testBinPath string
+ if len(goFlagsConfig.O) != 0 {
+ stat, err := os.Stat(goFlagsConfig.O)
+ if err != nil {
+ panic(err)
+ }
+ if stat.IsDir() {
+ testBinPath = goFlagsConfig.O + "/" + suite.PackageName + ".test"
+ } else {
+ testBinPath = goFlagsConfig.O
+ }
+ }
+ if len(testBinPath) == 0 {
+ testBinPath = path.Join(suite.Path, suite.PackageName+".test")
+ }
+ fmt.Printf("Compiled %s\n", testBinPath)
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
index 2efd286088..f0e7331f7d 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
@@ -12,7 +12,7 @@ func Abort(details AbortDetails) {
panic(details)
}
-func AbortGracefullyWith(format string, args ...interface{}) {
+func AbortGracefullyWith(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 0,
Error: fmt.Errorf(format, args...),
@@ -20,7 +20,7 @@ func AbortGracefullyWith(format string, args ...interface{}) {
})
}
-func AbortWith(format string, args ...interface{}) {
+func AbortWith(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 1,
Error: fmt.Errorf(format, args...),
@@ -28,7 +28,7 @@ func AbortWith(format string, args ...interface{}) {
})
}
-func AbortWithUsage(format string, args ...interface{}) {
+func AbortWithUsage(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 1,
Error: fmt.Errorf(format, args...),
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
index 12e0e56591..79b83a3af1 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
@@ -24,7 +24,11 @@ func (c Command) Run(args []string, additionalArgs []string) {
if err != nil {
AbortWithUsage(err.Error())
}
-
+ for _, arg := range args {
+ if len(arg) > 1 && strings.HasPrefix(arg, "-") {
+ AbortWith(types.GinkgoErrors.FlagAfterPositionalParameter().Error())
+ }
+ }
c.Command(args, additionalArgs)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
index 88dd8d6b07..c3f6d3a11e 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
@@ -68,7 +68,6 @@ func (p Program) RunAndExit(osArgs []string) {
fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
}
p.Exiter(exitCode)
- return
}()
args, additionalArgs := []string{}, []string{}
@@ -157,7 +156,6 @@ func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
p.EmitUsage(writer)
Abort(AbortDetails{ExitCode: 1})
}
- return
}
func (p Program) EmitUsage(writer io.Writer) {
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
index 73aff0b7a1..b2dc59be66 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
@@ -7,7 +7,7 @@ import (
"os"
"text/template"
- sprig "github.com/go-task/slim-sprig"
+ sprig "github.com/go-task/slim-sprig/v3"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal"
"github.com/onsi/ginkgo/v2/types"
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
index be01dec979..cf3b7cb6d6 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
@@ -10,7 +10,7 @@ import (
"strings"
"text/template"
- sprig "github.com/go-task/slim-sprig"
+ sprig "github.com/go-task/slim-sprig/v3"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal"
"github.com/onsi/ginkgo/v2/types"
@@ -174,6 +174,7 @@ func moduleName(modRoot string) string {
if err != nil {
return ""
}
+ defer modFile.Close()
mod := make([]byte, 128)
_, err = modFile.Read(mod)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
index 86da7340d1..7bbe6be0fc 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
@@ -11,7 +11,7 @@ import (
"github.com/onsi/ginkgo/v2/types"
)
-func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
+func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) TestSuite {
if suite.PathToCompiledTest != "" {
return suite
}
@@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
return suite
}
+ if len(goFlagsConfig.O) > 0 {
+ userDefinedPath, err := filepath.Abs(goFlagsConfig.O)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error())
+ return suite
+ }
+ path = userDefinedPath
+ }
+
+ goFlagsConfig.O = path
+
ginkgoInvocationPath, _ := os.Getwd()
ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
packagePath := suite.AbsPath()
@@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
return suite
}
- args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
+ args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath, preserveSymbols)
if err != nil {
suite.State = TestSuiteStateFailedToCompile
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
@@ -108,7 +120,7 @@ func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
}
}
-func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
+func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) {
opc.stopped = false
opc.idx = 0
opc.numSuites = len(suites)
@@ -123,7 +135,7 @@ func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsCon
stopped := opc.stopped
opc.mutex.Unlock()
if !stopped {
- suite = CompileSuite(suite, goFlagsConfig)
+ suite = CompileSuite(suite, goFlagsConfig, preserveSymbols)
}
c <- suite
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
new file mode 100644
index 0000000000..87cfa11194
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2015, Wade Simmons
+// All rights reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gocovmerge takes the results from multiple `go test -coverprofile`
+// runs and merges them into one profile
+
+// this file was originally taken from the gocovmerge project
+// see also: https://go.shabbyrobe.org/gocovmerge
+package internal
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "golang.org/x/tools/cover"
+)
+
+func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
+ i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
+ if i < len(profiles) && profiles[i].FileName == p.FileName {
+ MergeCoverProfiles(profiles[i], p)
+ } else {
+ profiles = append(profiles, nil)
+ copy(profiles[i+1:], profiles[i:])
+ profiles[i] = p
+ }
+ return profiles
+}
+
+func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
+ if len(profiles) == 0 {
+ return nil
+ }
+ if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
+ return err
+ }
+ for _, p := range profiles {
+ for _, b := range p.Blocks {
+ if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
+ if into.Mode != merge.Mode {
+ return fmt.Errorf("cannot merge profiles with different modes")
+ }
+ // Since the blocks are sorted, we can keep track of where the last block
+ // was inserted and only look at the blocks after that as targets for merge
+ startIndex := 0
+ for _, b := range merge.Blocks {
+ var err error
+ startIndex, err = mergeProfileBlock(into, b, startIndex)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
+ sortFunc := func(i int) bool {
+ pi := p.Blocks[i+startIndex]
+ return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
+ }
+
+ i := 0
+ if !sortFunc(i) {
+ i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
+ }
+
+ i += startIndex
+ if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
+ if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
+ return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
+ }
+ switch p.Mode {
+ case "set":
+ p.Blocks[i].Count |= pb.Count
+ case "count", "atomic":
+ p.Blocks[i].Count += pb.Count
+ default:
+ return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
+ }
+
+ } else {
+ if i > 0 {
+ pa := p.Blocks[i-1]
+ if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
+ return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ if i < len(p.Blocks)-1 {
+ pa := p.Blocks[i+1]
+ if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
+ return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ p.Blocks = append(p.Blocks, cover.ProfileBlock{})
+ copy(p.Blocks[i+1:], p.Blocks[i:])
+ p.Blocks[i] = pb
+ }
+
+ return i + 1, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
index bd3c6d0287..f3439a3f0c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
@@ -1,7 +1,6 @@
package internal
import (
- "bytes"
"fmt"
"os"
"os/exec"
@@ -12,6 +11,7 @@ import (
"github.com/google/pprof/profile"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/cover"
)
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
@@ -90,6 +90,9 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
if reporterConfig.JSONReport != "" {
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
}
+ if reporterConfig.GoJSONReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.GoJSONReport, GenerateFunc: reporters.GenerateGoTestJSONReport, MergeFunc: reporters.MergeAndCleanupGoTestJSONReports})
+ }
if reporterConfig.JUnitReport != "" {
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
}
@@ -144,38 +147,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
return messages, nil
}
-//loads each profile, combines them, deletes them, stores them in destination
+// loads each profile, merges them, deletes them, stores them in destination
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
- combined := &bytes.Buffer{}
- modeRegex := regexp.MustCompile(`^mode: .*\n`)
- for i, profile := range profiles {
- contents, err := os.ReadFile(profile)
+ var merged []*cover.Profile
+ for _, file := range profiles {
+ parsedProfiles, err := cover.ParseProfiles(file)
if err != nil {
- return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
- }
- os.Remove(profile)
-
- // remove the cover mode line from every file
- // except the first one
- if i > 0 {
- contents = modeRegex.ReplaceAll(contents, []byte{})
+ return err
}
-
- _, err = combined.Write(contents)
-
- // Add a newline to the end of every file if missing.
- if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
- _, err = combined.Write([]byte("\n"))
- }
-
- if err != nil {
- return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
+ os.Remove(file)
+ for _, p := range parsedProfiles {
+ merged = AddCoverProfile(merged, p)
}
}
-
- err := os.WriteFile(destination, combined.Bytes(), 0666)
+ dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ err = DumpCoverProfiles(merged, dst)
if err != nil {
- return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
+ return err
}
return nil
}
@@ -184,7 +176,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) {
cmd := exec.Command("go", "tool", "cover", "-func", profile)
output, err := cmd.CombinedOutput()
if err != nil {
- return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
+ return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
}
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
matches := re.FindStringSubmatch(string(output))
@@ -208,6 +200,7 @@ func MergeProfiles(profilePaths []string, destination string) error {
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
}
prof, err := profile.Parse(proFile)
+ _ = proFile.Close()
if err != nil {
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
index 41052ea19d..30d8096cd6 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
@@ -107,6 +107,9 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t
if reporterConfig.JSONReport != "" {
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
}
+ if reporterConfig.GoJSONReport != "" {
+ reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0)
+ }
if reporterConfig.JUnitReport != "" {
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
}
@@ -179,6 +182,9 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig
if reporterConfig.JSONReport != "" {
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
}
+ if reporterConfig.GoJSONReport != "" {
+ reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0)
+ }
if reporterConfig.JUnitReport != "" {
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
index 64dcb1b78c..df99875be2 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
@@ -7,6 +7,7 @@ import (
"path"
"path/filepath"
"regexp"
+ "runtime"
"strings"
"github.com/onsi/ginkgo/v2/types"
@@ -192,7 +193,7 @@ func precompiledTestSuite(path string) (TestSuite, error) {
return TestSuite{}, errors.New("this is not a .test binary")
}
- if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 {
+ if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
return TestSuite{}, errors.New("this is not executable")
}
@@ -225,7 +226,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
files, _ := os.ReadDir(dir)
re := regexp.MustCompile(`^[^._].*_test\.go$`)
for _, file := range files {
- if !file.IsDir() && re.Match([]byte(file.Name())) {
+ if !file.IsDir() && re.MatchString(file.Name()) {
suite := TestSuite{
Path: relPath(dir),
PackageName: packageNameForSuite(dir),
@@ -240,7 +241,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
if recurse {
re = regexp.MustCompile(`^[._]`)
for _, file := range files {
- if file.IsDir() && !re.Match([]byte(file.Name())) {
+ if file.IsDir() && !re.MatchString(file.Name()) {
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
}
}
@@ -271,7 +272,7 @@ func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
for _, file := range files {
- if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
+ if !file.IsDir() && reTestFile.MatchString(file.Name()) {
contents, _ := os.ReadFile(dir + "/" + file.Name())
if reGinkgo.Match(contents) {
return true
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
index e9abb27d8b..419589b48c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
@@ -3,7 +3,6 @@ package main
import (
"fmt"
"os"
-
"github.com/onsi/ginkgo/v2/ginkgo/build"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/generators"
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
index 958daccbfa..5d8d00bb17 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
@@ -1,10 +1,11 @@
package outline
import (
- "github.com/onsi/ginkgo/v2/types"
"go/ast"
"go/token"
"strconv"
+
+ "github.com/onsi/ginkgo/v2/types"
)
const (
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
index 67ec5ab757..f0a6b5d26c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
@@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string {
}
name := spec.Name.String()
if name == "" {
- // If the package name is not explicitly specified,
- // make an educated guess. This is not guaranteed to be correct.
- lastSlash := strings.LastIndex(path, "/")
- if lastSlash == -1 {
- name = path
- } else {
- name = path[lastSlash+1:]
- }
+ name = "ginkgo"
}
if name == "." {
name = ""
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
index c2327cda8c..e99d557d1f 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
@@ -1,10 +1,13 @@
package outline
import (
+ "bytes"
+ "encoding/csv"
"encoding/json"
"fmt"
"go/ast"
"go/token"
+ "strconv"
"strings"
"golang.org/x/tools/go/ast/inspector"
@@ -84,9 +87,11 @@ func (o *outline) String() string {
// StringIndent returns a CSV-formated outline, but every line is indented by
// one 'width' of spaces for every level of nesting.
func (o *outline) StringIndent(width int) string {
- var b strings.Builder
+ var b bytes.Buffer
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
+ csvWriter := csv.NewWriter(&b)
+
currentIndent := 0
pre := func(n *ginkgoNode) {
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
@@ -96,8 +101,22 @@ func (o *outline) StringIndent(width int) string {
} else {
labels = strings.Join(n.Labels, ", ")
}
- //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
- b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
+
+ row := []string{
+ n.Name,
+ n.Text,
+ strconv.Itoa(n.Start),
+ strconv.Itoa(n.End),
+ strconv.FormatBool(n.Spec),
+ strconv.FormatBool(n.Focused),
+ strconv.FormatBool(n.Pending),
+ labels,
+ }
+ csvWriter.Write(row)
+
+ // Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation
+ csvWriter.Flush()
+
currentIndent += width
}
post := func(n *ginkgoNode) {
@@ -106,5 +125,6 @@ func (o *outline) StringIndent(width int) string {
for _, n := range o.Nodes {
n.Walk(pre, post)
}
+
return b.String()
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
index aaed4d570e..03875b9796 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
@@ -107,7 +107,7 @@ OUTER_LOOP:
}
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
- opc.StartCompiling(suites, r.goFlagsConfig)
+ opc.StartCompiling(suites, r.goFlagsConfig, false)
SUITE_LOOP:
for {
@@ -142,7 +142,7 @@ OUTER_LOOP:
}
if !endTime.IsZero() {
- r.suiteConfig.Timeout = endTime.Sub(time.Now())
+ r.suiteConfig.Timeout = time.Until(endTime)
if r.suiteConfig.Timeout <= 0 {
suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
opc.StopAndDrain()
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
index f5ddff30fc..75cbdb4962 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
@@ -2,12 +2,9 @@ package watch
import (
"go/build"
- "regexp"
+ "strings"
)
-var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
-var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
-
type Dependencies struct {
deps map[string]int
}
@@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) {
if err != nil {
continue
}
- if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) {
+ if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) {
d.addDepIfNotPresent(pkg.Dir, depth)
}
}
@@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
d.deps[dep] = depth
}
}
+
+func matchesGinkgoOrGomega(s string) bool {
+ return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega")
+}
+
+func matchesGinkgoIntegration(s string) bool {
+ return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
index e9f7ec0cb3..0e6ae1f290 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"regexp"
+ "strings"
"time"
)
@@ -79,7 +80,11 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
continue
}
- if goTestRegExp.Match([]byte(info.Name())) {
+ if isHiddenFile(info) {
+ continue
+ }
+
+ if goTestRegExp.MatchString(info.Name()) {
testHash += p.hashForFileInfo(info)
if info.ModTime().After(testModifiedTime) {
testModifiedTime = info.ModTime()
@@ -87,7 +92,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
continue
}
- if p.watchRegExp.Match([]byte(info.Name())) {
+ if p.watchRegExp.MatchString(info.Name()) {
codeHash += p.hashForFileInfo(info)
if info.ModTime().After(codeModifiedTime) {
codeModifiedTime = info.ModTime()
@@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
return
}
+func isHiddenFile(info os.FileInfo) bool {
+ return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
+}
+
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
index bde4193ce7..fe1ca30519 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
@@ -153,7 +153,7 @@ func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
}
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
- suite = internal.CompileSuite(suite, w.goFlagsConfig)
+ suite = internal.CompileSuite(suite, w.goFlagsConfig, false)
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
fmt.Println(suite.CompilationError.Error())
return suite
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
index 28447ffdd2..40d1e1ab5c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
@@ -1,7 +1,12 @@
package ginkgo
import (
+ "context"
+ "io"
+ "testing"
+
"github.com/onsi/ginkgo/v2/internal/testingtproxy"
+ "github.com/onsi/ginkgo/v2/types"
)
/*
@@ -12,10 +17,15 @@ GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's
GinkgoT() takes an optional offset argument that can be used to get the
correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following:
+
+- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf.
+- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model.
+
You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
*/
func GinkgoT(optionalOffset ...int) FullGinkgoTInterface {
- offset := 3
+ offset := 1
if len(optionalOffset) > 0 {
offset = optionalOffset[0]
}
@@ -40,24 +50,28 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the
*/
type GinkgoTInterface interface {
Cleanup(func())
+ Chdir(dir string)
+ Context() context.Context
Setenv(kev, value string)
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
+ Error(args ...any)
+ Errorf(format string, args ...any)
Fail()
FailNow()
Failed() bool
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
Helper()
- Log(args ...interface{})
- Logf(format string, args ...interface{})
+ Log(args ...any)
+ Logf(format string, args ...any)
Name() string
Parallel()
- Skip(args ...interface{})
+ Skip(args ...any)
SkipNow()
- Skipf(format string, args ...interface{})
+ Skipf(format string, args ...any)
Skipped() bool
TempDir() string
+ Attr(key, value string)
+ Output() io.Writer
}
/*
@@ -71,9 +85,9 @@ type FullGinkgoTInterface interface {
AddReportEntryVisibilityNever(name string, args ...any)
//Prints to the GinkgoWriter
- Print(a ...interface{})
- Printf(format string, a ...interface{})
- Println(a ...interface{})
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
//Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo
F(format string, args ...any) string
@@ -92,3 +106,93 @@ type FullGinkgoTInterface interface {
AttachProgressReporter(func() string) func()
}
+
+/*
+GinkgoTB() implements a wrapper that exactly matches the testing.TB interface.
+
+In go 1.18 a new private() function was added to the testing.TB interface. Any function which accepts testing.TB as input needs to be passed in something that directly implements testing.TB.
+
+This wrapper satisfies the testing.TB interface and intended to be used as a drop-in replacement with third party libraries that accept testing.TB.
+
+Similar to GinkgoT(), GinkgoTB() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+*/
+func GinkgoTB(optionalOffset ...int) *GinkgoTBWrapper {
+ offset := 2
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return &GinkgoTBWrapper{GinkgoT: GinkgoT(offset)}
+}
+
+type GinkgoTBWrapper struct {
+ testing.TB
+ GinkgoT FullGinkgoTInterface
+}
+
+func (g *GinkgoTBWrapper) Cleanup(f func()) {
+ g.GinkgoT.Cleanup(f)
+}
+func (g *GinkgoTBWrapper) Chdir(dir string) {
+ g.GinkgoT.Chdir(dir)
+}
+func (g *GinkgoTBWrapper) Context() context.Context {
+ return g.GinkgoT.Context()
+}
+func (g *GinkgoTBWrapper) Error(args ...any) {
+ g.GinkgoT.Error(args...)
+}
+func (g *GinkgoTBWrapper) Errorf(format string, args ...any) {
+ g.GinkgoT.Errorf(format, args...)
+}
+func (g *GinkgoTBWrapper) Fail() {
+ g.GinkgoT.Fail()
+}
+func (g *GinkgoTBWrapper) FailNow() {
+ g.GinkgoT.FailNow()
+}
+func (g *GinkgoTBWrapper) Failed() bool {
+ return g.GinkgoT.Failed()
+}
+func (g *GinkgoTBWrapper) Fatal(args ...any) {
+ g.GinkgoT.Fatal(args...)
+}
+func (g *GinkgoTBWrapper) Fatalf(format string, args ...any) {
+ g.GinkgoT.Fatalf(format, args...)
+}
+func (g *GinkgoTBWrapper) Helper() {
+ types.MarkAsHelper(1)
+}
+func (g *GinkgoTBWrapper) Log(args ...any) {
+ g.GinkgoT.Log(args...)
+}
+func (g *GinkgoTBWrapper) Logf(format string, args ...any) {
+ g.GinkgoT.Logf(format, args...)
+}
+func (g *GinkgoTBWrapper) Name() string {
+ return g.GinkgoT.Name()
+}
+func (g *GinkgoTBWrapper) Setenv(key, value string) {
+ g.GinkgoT.Setenv(key, value)
+}
+func (g *GinkgoTBWrapper) Skip(args ...any) {
+ g.GinkgoT.Skip(args...)
+}
+func (g *GinkgoTBWrapper) SkipNow() {
+ g.GinkgoT.SkipNow()
+}
+func (g *GinkgoTBWrapper) Skipf(format string, args ...any) {
+ g.GinkgoT.Skipf(format, args...)
+}
+func (g *GinkgoTBWrapper) Skipped() bool {
+ return g.GinkgoT.Skipped()
+}
+func (g *GinkgoTBWrapper) TempDir() string {
+ return g.GinkgoT.TempDir()
+}
+func (g *GinkgoTBWrapper) Attr(key, value string) {
+ g.GinkgoT.Attr(key, value)
+}
+func (g *GinkgoTBWrapper) Output() io.Writer {
+ return g.GinkgoT.Output()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go
new file mode 100644
index 0000000000..c965710205
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go
@@ -0,0 +1,34 @@
+package internal
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func ComputeAroundNodes(specs Specs) Specs {
+ out := Specs{}
+ for _, spec := range specs {
+ nodes := Nodes{}
+ currentNestingLevel := 0
+ aroundNodes := types.AroundNodes{}
+ nestingLevelIndices := []int{}
+ for _, node := range spec.Nodes {
+ switch node.NodeType {
+ case types.NodeTypeContainer:
+ currentNestingLevel = node.NestingLevel + 1
+ nestingLevelIndices = append(nestingLevelIndices, len(aroundNodes))
+ aroundNodes = aroundNodes.Append(node.AroundNodes...)
+ nodes = append(nodes, node)
+ default:
+ if currentNestingLevel > node.NestingLevel {
+ currentNestingLevel = node.NestingLevel
+ aroundNodes = aroundNodes[:nestingLevelIndices[currentNestingLevel]]
+ }
+ node.AroundNodes = types.AroundNodes{}.Append(aroundNodes...).Append(node.AroundNodes...)
+ nodes = append(nodes, node)
+ }
+ }
+ spec.Nodes = nodes
+ out = append(out, spec)
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
index e9bd9565fc..8c5de9c160 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
@@ -32,7 +32,7 @@ func (f *Failer) GetFailure() types.Failure {
return f.failure
}
-func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic any) {
f.lock.Lock()
defer f.lock.Unlock()
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
index e3da7d14dd..a39daf5a60 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
@@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic
*Note:* specs with pending nodes are Skipped when created by NewSpec.
*/
-func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
+func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) {
focusString := strings.Join(suiteConfig.FocusStrings, "|")
skipString := strings.Join(suiteConfig.SkipStrings, "|")
@@ -84,6 +84,13 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit
})
}
+ if suiteConfig.SemVerFilter != "" {
+ semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter)
+ skipChecks = append(skipChecks, func(spec Spec) bool {
+ return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints()))
+ })
+ }
+
if len(suiteConfig.FocusFiles) > 0 {
focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
index 02c9fe4fcd..cc794903e7 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
@@ -110,21 +110,53 @@ func newGroup(suite *Suite) *group {
}
}
+// initialReportForSpec constructs a new SpecReport right before running the spec.
func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
return types.SpecReport{
- ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
- ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
- ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
- LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
- LeafNodeType: types.NodeTypeIt,
- LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
- LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
- ParallelProcess: g.suite.config.ParallelProcess,
- RunningInParallel: g.suite.isRunningInParallel(),
- IsSerial: spec.Nodes.HasNodeMarkedSerial(),
- IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
- MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
- MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
+ ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
+ ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
+ ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(),
+ LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
+ LeafNodeType: types.NodeTypeIt,
+ LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
+ LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
+ LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints),
+ ParallelProcess: g.suite.config.ParallelProcess,
+ RunningInParallel: g.suite.isRunningInParallel(),
+ IsSerial: spec.Nodes.HasNodeMarkedSerial(),
+ IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
+ MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
+ MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ SpecPriority: spec.Nodes.GetSpecPriority(),
+ }
+}
+
+// constructionNodeReportForTreeNode constructs a new SpecReport right before invoking the body
+// of a container node during construction of the full tree.
+func constructionNodeReportForTreeNode(node *TreeNode) *types.ConstructionNodeReport {
+ var report types.ConstructionNodeReport
+ // Walk up the tree and set attributes accordingly.
+ addNodeToReportForNode(&report, node)
+ return &report
+}
+
+// addNodeToReportForNode is conceptually similar to initialReportForSpec and therefore placed here
+// although it doesn't do anything with a group.
+func addNodeToReportForNode(report *types.ConstructionNodeReport, node *TreeNode) {
+ if node.Parent != nil {
+ // First add the parent node, then the current one.
+ addNodeToReportForNode(report, node.Parent)
+ }
+ report.ContainerHierarchyTexts = append(report.ContainerHierarchyTexts, node.Node.Text)
+ report.ContainerHierarchyLocations = append(report.ContainerHierarchyLocations, node.Node.CodeLocation)
+ report.ContainerHierarchyLabels = append(report.ContainerHierarchyLabels, node.Node.Labels)
+ report.ContainerHierarchySemVerConstraints = append(report.ContainerHierarchySemVerConstraints, node.Node.SemVerConstraints)
+ if node.Node.MarkedSerial {
+ report.IsSerial = true
+ }
+ if node.Node.MarkedOrdered {
+ report.IsInOrderedContainer = true
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
index 8ed86111f7..79bfa87db2 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
@@ -40,7 +40,7 @@ func (ic InterruptCause) String() string {
}
type InterruptStatus struct {
- Channel chan interface{}
+ Channel chan any
Level InterruptLevel
Cause InterruptCause
}
@@ -62,14 +62,14 @@ type InterruptHandlerInterface interface {
}
type InterruptHandler struct {
- c chan interface{}
+ c chan any
lock *sync.Mutex
level InterruptLevel
cause InterruptCause
client parallel_support.Client
- stop chan interface{}
+ stop chan any
signals []os.Signal
- requestAbortCheck chan interface{}
+ requestAbortCheck chan any
}
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
@@ -77,10 +77,10 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *
signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
}
handler := &InterruptHandler{
- c: make(chan interface{}),
+ c: make(chan any),
lock: &sync.Mutex{},
- stop: make(chan interface{}),
- requestAbortCheck: make(chan interface{}),
+ stop: make(chan any),
+ requestAbortCheck: make(chan any),
client: client,
signals: signals,
}
@@ -98,9 +98,9 @@ func (handler *InterruptHandler) registerForInterrupts() {
signal.Notify(signalChannel, handler.signals...)
// cross-process abort handling
- var abortChannel chan interface{}
+ var abortChannel chan any
if handler.client != nil {
- abortChannel = make(chan interface{})
+ abortChannel = make(chan any)
go func() {
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
for {
@@ -125,7 +125,7 @@ func (handler *InterruptHandler) registerForInterrupts() {
}()
}
- go func(abortChannel chan interface{}) {
+ go func(abortChannel chan any) {
var interruptCause InterruptCause
for {
select {
@@ -151,7 +151,7 @@ func (handler *InterruptHandler) registerForInterrupts() {
}
if handler.level != oldLevel {
close(handler.c)
- handler.c = make(chan interface{})
+ handler.c = make(chan any)
}
handler.lock.Unlock()
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
index 16f0dc2278..2bccec2dbf 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
"reflect"
+ "slices"
"sort"
- "time"
-
"sync"
+ "time"
"github.com/onsi/ginkgo/v2/types"
)
@@ -16,8 +16,8 @@ var _global_node_id_counter = uint(0)
var _global_id_mutex = &sync.Mutex{}
func UniqueNodeID() uint {
- //There's a reace in the internal integration tests if we don't make
- //accessing _global_node_id_counter safe across goroutines.
+ // There's a reace in the internal integration tests if we don't make
+ // accessing _global_node_id_counter safe across goroutines.
_global_id_mutex.Lock()
defer _global_id_mutex.Unlock()
_global_node_id_counter += 1
@@ -44,23 +44,27 @@ type Node struct {
SynchronizedAfterSuiteProc1Body func(SpecContext)
SynchronizedAfterSuiteProc1BodyHasContext bool
- ReportEachBody func(types.SpecReport)
- ReportSuiteBody func(types.Report)
-
- MarkedFocus bool
- MarkedPending bool
- MarkedSerial bool
- MarkedOrdered bool
- MarkedContinueOnFailure bool
- MarkedOncePerOrdered bool
- FlakeAttempts int
- MustPassRepeatedly int
- Labels Labels
- PollProgressAfter time.Duration
- PollProgressInterval time.Duration
- NodeTimeout time.Duration
- SpecTimeout time.Duration
- GracePeriod time.Duration
+ ReportEachBody func(SpecContext, types.SpecReport)
+ ReportSuiteBody func(SpecContext, types.Report)
+
+ MarkedFocus bool
+ MarkedPending bool
+ MarkedSerial bool
+ MarkedOrdered bool
+ MarkedContinueOnFailure bool
+ MarkedOncePerOrdered bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ Labels Labels
+ SemVerConstraints SemVerConstraints
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ NodeTimeout time.Duration
+ SpecTimeout time.Duration
+ GracePeriod time.Duration
+ AroundNodes types.AroundNodes
+ HasExplicitlySetSpecPriority bool
+ SpecPriority int
NodeIDWhereCleanupWasGenerated uint
}
@@ -85,35 +89,51 @@ const SuppressProgressReporting = suppressProgressReporting(true)
type FlakeAttempts uint
type MustPassRepeatedly uint
type Offset uint
-type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
-type Labels []string
+type Done chan<- any // Deprecated Done Channel for asynchronous testing
type PollProgressInterval time.Duration
type PollProgressAfter time.Duration
type NodeTimeout time.Duration
type SpecTimeout time.Duration
type GracePeriod time.Duration
+type SpecPriority int
+
+type Labels []string
func (l Labels) MatchesLabelFilter(query string) bool {
return types.MustParseLabelFilter(query)(l)
}
-func UnionOfLabels(labels ...Labels) Labels {
- out := Labels{}
- seen := map[string]bool{}
- for _, labelSet := range labels {
- for _, label := range labelSet {
- if !seen[label] {
- seen[label] = true
- out = append(out, label)
+type SemVerConstraints []string
+
+func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool {
+ return types.MustParseSemVerFilter(version)(svc)
+}
+
+func unionOf[S ~[]E, E comparable](slices ...S) S {
+ out := S{}
+ seen := map[E]bool{}
+ for _, slice := range slices {
+ for _, item := range slice {
+ if !seen[item] {
+ seen[item] = true
+ out = append(out, item)
}
}
}
return out
}
-func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
- decorations := []interface{}{}
- remainingArgs := []interface{}{}
+func UnionOfLabels(labels ...Labels) Labels {
+ return unionOf(labels...)
+}
+
+func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerConstraints {
+ return unionOf(semVerConstraints...)
+}
+
+func PartitionDecorations(args ...any) ([]any, []any) {
+ decorations := []any{}
+ remainingArgs := []any{}
for _, arg := range args {
if isDecoration(arg) {
decorations = append(decorations, arg)
@@ -124,7 +144,7 @@ func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
return decorations, remainingArgs
}
-func isDecoration(arg interface{}) bool {
+func isDecoration(arg any) bool {
switch t := reflect.TypeOf(arg); {
case t == nil:
return false
@@ -152,6 +172,8 @@ func isDecoration(arg interface{}) bool {
return true
case t == reflect.TypeOf(Labels{}):
return true
+ case t == reflect.TypeOf(SemVerConstraints{}):
+ return true
case t == reflect.TypeOf(PollProgressInterval(0)):
return true
case t == reflect.TypeOf(PollProgressAfter(0)):
@@ -162,6 +184,10 @@ func isDecoration(arg interface{}) bool {
return true
case t == reflect.TypeOf(GracePeriod(0)):
return true
+ case t == reflect.TypeOf(types.AroundNodeDecorator{}):
+ return true
+ case t == reflect.TypeOf(SpecPriority(0)):
+ return true
case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
return true
default:
@@ -169,7 +195,7 @@ func isDecoration(arg interface{}) bool {
}
}
-func isSliceOfDecorations(slice interface{}) bool {
+func isSliceOfDecorations(slice any) bool {
vSlice := reflect.ValueOf(slice)
if vSlice.Len() == 0 {
return false
@@ -185,13 +211,14 @@ func isSliceOfDecorations(slice interface{}) bool {
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
-func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
+func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
+ SemVerConstraints: SemVerConstraints{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
@@ -206,10 +233,10 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
}
- args = unrollInterfaceSlice(args)
+ args = UnrollInterfaceSlice(args)
- remainingArgs := []interface{}{}
- //First get the CodeLocation up-to-date
+ remainingArgs := []any{}
+ // First get the CodeLocation up-to-date
for _, arg := range args {
switch v := arg.(type) {
case Offset:
@@ -222,14 +249,15 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
labelsSeen := map[string]bool{}
+ semVerConstraintsSeen := map[string]bool{}
trackedFunctionError := false
args = remainingArgs
- remainingArgs = []interface{}{}
- //now process the rest of the args
+ remainingArgs = []any{}
+ // now process the rest of the args
for _, arg := range args {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(float64(0)):
- break //ignore deprecated timeouts
+ break // ignore deprecated timeouts
case t == reflect.TypeOf(Focus):
node.MarkedFocus = bool(arg.(focusType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
@@ -242,6 +270,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
case t == reflect.TypeOf(Serial):
node.MarkedSerial = bool(arg.(serialType))
+ if !labelsSeen["Serial"] {
+ node.Labels = append(node.Labels, "Serial")
+ }
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
}
@@ -297,6 +328,14 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
}
+ case t == reflect.TypeOf(SpecPriority(0)):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecPriority"))
+ }
+ node.SpecPriority = int(arg.(SpecPriority))
+ node.HasExplicitlySetSpecPriority = true
+ case t == reflect.TypeOf(types.AroundNodeDecorator{}):
+ node.AroundNodes = append(node.AroundNodes, arg.(types.AroundNodeDecorator))
case t == reflect.TypeOf(Labels{}):
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
@@ -309,6 +348,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
appendError(err)
}
}
+ case t == reflect.TypeOf(SemVerConstraints{}):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SemVerConstraint"))
+ }
+ for _, semVerConstraint := range arg.(SemVerConstraints) {
+ if !semVerConstraintsSeen[semVerConstraint] {
+ semVerConstraintsSeen[semVerConstraint] = true
+ semVerConstraint, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation)
+ node.SemVerConstraints = append(node.SemVerConstraints, semVerConstraint)
+ appendError(err)
+ }
+ }
case t.Kind() == reflect.Func:
if nodeType.Is(types.NodeTypeContainer) {
if node.Body != nil {
@@ -325,7 +376,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
node.Body = func(SpecContext) { body() }
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
if node.ReportEachBody == nil {
- node.ReportEachBody = arg.(func(types.SpecReport))
+ if fn, ok := arg.(func(types.SpecReport)); ok {
+ node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
+ } else {
+ node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
+ node.HasContext = true
+ }
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
@@ -333,7 +389,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
if node.ReportSuiteBody == nil {
- node.ReportSuiteBody = arg.(func(types.Report))
+ if fn, ok := arg.(func(types.Report)); ok {
+ node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
+ } else {
+ node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
+ node.HasContext = true
+ }
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
@@ -395,7 +456,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
}
- //validations
+ // validations
if node.MarkedPending && node.MarkedFocus {
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
}
@@ -439,7 +500,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
var doneType = reflect.TypeOf(make(Done))
-func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
+func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg any) (func(SpecContext), bool) {
t := reflect.TypeOf(arg)
if t.NumOut() > 0 || t.NumIn() > 1 {
return nil, false
@@ -465,7 +526,7 @@ func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.
var byteType = reflect.TypeOf([]byte{})
-func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
+func extractSynchronizedBeforeSuiteProc1Body(arg any) (func(SpecContext) []byte, bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
@@ -493,7 +554,7 @@ func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext)
}, hasContext
}
-func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
+func extractSynchronizedBeforeSuiteAllProcsBody(arg any) (func(SpecContext, []byte), bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
hasContext, hasByte := false, false
@@ -524,11 +585,11 @@ func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecConte
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
-func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
+func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...any) (Node, []error) {
decorations, remainingArgs := PartitionDecorations(args...)
baseOffset := 2
cl := types.NewCodeLocation(baseOffset)
- finalArgs := []interface{}{}
+ finalArgs := []any{}
for _, arg := range decorations {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(Offset(0)):
@@ -587,7 +648,7 @@ func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(stri
})
}
- return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
+ return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs)
}
func (n Node) IsZero() bool {
@@ -812,6 +873,32 @@ func (n Nodes) UnionOfLabels() []string {
return out
}
+func (n Nodes) SemVerConstraints() [][]string {
+ out := make([][]string, len(n))
+ for i := range n {
+ if n[i].SemVerConstraints == nil {
+ out[i] = []string{}
+ } else {
+ out[i] = []string(n[i].SemVerConstraints)
+ }
+ }
+ return out
+}
+
+func (n Nodes) UnionOfSemVerConstraints() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for i := range n {
+ for _, constraint := range n[i].SemVerConstraints {
+ if !seen[constraint] {
+ seen[constraint] = true
+ out = append(out, constraint)
+ }
+ }
+ }
+ return out
+}
+
func (n Nodes) CodeLocations() []types.CodeLocation {
out := make([]types.CodeLocation, len(n))
for i := range n {
@@ -908,19 +995,84 @@ func (n Nodes) GetMaxMustPassRepeatedly() int {
return maxMustPassRepeatedly
}
-func unrollInterfaceSlice(args interface{}) []interface{} {
+func (n Nodes) GetSpecPriority() int {
+ for i := len(n) - 1; i >= 0; i-- {
+ if n[i].HasExplicitlySetSpecPriority {
+ return n[i].SpecPriority
+ }
+ }
+ return 0
+}
+
+func UnrollInterfaceSlice(args any) []any {
v := reflect.ValueOf(args)
if v.Kind() != reflect.Slice {
- return []interface{}{args}
+ return []any{args}
}
- out := []interface{}{}
+ out := []any{}
for i := 0; i < v.Len(); i++ {
el := reflect.ValueOf(v.Index(i).Interface())
- if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
- out = append(out, unrollInterfaceSlice(el.Interface())...)
+ if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) && el.Type() != reflect.TypeOf(SemVerConstraints{}) {
+ out = append(out, UnrollInterfaceSlice(el.Interface())...)
} else {
out = append(out, v.Index(i).Interface())
}
}
return out
}
+
+type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error)
+
+func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() {
+ id := nodeArgsTransformerCounter
+ nodeArgsTransformerCounter++
+ nodeArgsTransformers = append(nodeArgsTransformers, registeredNodeArgsTransformer{id, transformer})
+ return func() {
+ nodeArgsTransformers = slices.DeleteFunc(nodeArgsTransformers, func(transformer registeredNodeArgsTransformer) bool {
+ return transformer.id == id
+ })
+ }
+}
+
+var (
+ nodeArgsTransformerCounter int64
+ nodeArgsTransformers []registeredNodeArgsTransformer
+)
+
+type registeredNodeArgsTransformer struct {
+ id int64
+ transformer NodeArgsTransformer
+}
+
+// TransformNewNodeArgs is the helper for DSL functions which handles NodeArgsTransformers.
+//
+// Its return valus are intentionally the same as the internal.NewNode parameters,
+// which makes it possible to chain the invocations:
+//
+// NewNode(transformNewNodeArgs(...))
+func TransformNewNodeArgs(exitIfErrors func([]error), deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (*types.DeprecationTracker, types.NodeType, string, []any) {
+ var errs []error
+
+ // Most recent first...
+ //
+ // This intentionally doesn't use slices.Backward because
+ // using iterators influences stack unwinding.
+ for i := len(nodeArgsTransformers) - 1; i >= 0; i-- {
+ transformer := nodeArgsTransformers[i].transformer
+ args = UnrollInterfaceSlice(args)
+
+ // We do not really need to recompute this on additional loop iterations,
+ // but its fast and simpler this way.
+ var offset Offset
+ for _, arg := range args {
+ if o, ok := arg.(Offset); ok {
+ offset = o
+ }
+ }
+ offset += 3 // The DSL function, this helper, and the TransformNodeArgs implementation.
+
+ text, args, errs = transformer(nodeType, offset, text, args)
+ exitIfErrors(errs)
+ }
+ return deprecationTracker, nodeType, text, args
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
index 84eea0a59e..da58d54f95 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
@@ -125,7 +125,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
// pick out a representative spec
representativeSpec := specs[executionGroups[groupID][0]]
- // and grab the node on the spec that will represent which shufflable group this execution group belongs tu
+ // and grab the node on the spec that will represent which shufflable group this execution group belongs to
shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
//add the execution group to its shufflable group
@@ -138,14 +138,35 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
}
}
+ // now, for each shuffleable group, we compute the priority
+ shufflableGroupingIDPriorities := map[uint]int{}
+ for shufflableGroupingID, groupIDs := range shufflableGroupingIDToGroupIDs {
+ // the priority of a shufflable grouping is the max priority of any spec in any execution group in the shufflable grouping
+ maxPriority := -1 << 31 // min int
+ for _, groupID := range groupIDs {
+ for _, specIdx := range executionGroups[groupID] {
+ specPriority := specs[specIdx].Nodes.GetSpecPriority()
+ maxPriority = max(specPriority, maxPriority)
+ }
+ }
+ shufflableGroupingIDPriorities[shufflableGroupingID] = maxPriority
+ }
+
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
- orderedGroups := GroupedSpecIndices{}
permutation := r.Perm(len(shufflableGroupingIDs))
- for _, j := range permutation {
- //let's get the execution group IDs for this shufflable group:
- executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
- // and we'll add their associated specindices to the orderedGroups slice:
- for _, executionGroupID := range executionGroupIDsForJ {
+ shuffledGroupingIds := make([]uint, len(shufflableGroupingIDs))
+ for i, j := range permutation {
+ shuffledGroupingIds[i] = shufflableGroupingIDs[j]
+ }
+ // now, we need to stable sort the shuffledGroupingIds by priority (higher priority first)
+ sort.SliceStable(shuffledGroupingIds, func(i, j int) bool {
+ return shufflableGroupingIDPriorities[shuffledGroupingIds[i]] > shufflableGroupingIDPriorities[shuffledGroupingIds[j]]
+ })
+
+ // we can now take these prioritized, shuffled, groupings and form the final set of ordered spec groups
+ orderedGroups := GroupedSpecIndices{}
+ for _, id := range shuffledGroupingIds {
+ for _, executionGroupID := range shufflableGroupingIDToGroupIDs[id] {
orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
index 4a1c094612..5598f15cbb 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
@@ -69,7 +69,7 @@ type pipePair struct {
writer *os.File
}
-func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
+func startPipeFactory(pipeChannel chan pipePair, shutdown chan any) {
for {
//make the next pipe...
pair := pipePair{}
@@ -101,8 +101,8 @@ type genericOutputInterceptor struct {
stderrClone *os.File
pipe pipePair
- shutdown chan interface{}
- emergencyBailout chan interface{}
+ shutdown chan any
+ emergencyBailout chan any
pipeChannel chan pipePair
interceptedContent chan string
@@ -139,7 +139,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
interceptor.intercepting = true
if interceptor.stdoutClone == nil {
interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
- interceptor.shutdown = make(chan interface{})
+ interceptor.shutdown = make(chan any)
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
}
@@ -147,13 +147,13 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
interceptor.pipe = <-interceptor.pipeChannel
- interceptor.emergencyBailout = make(chan interface{})
+ interceptor.emergencyBailout = make(chan any)
//Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
go func() {
buffer := &bytes.Buffer{}
destination := io.MultiWriter(buffer, interceptor.forwardTo)
- copyFinished := make(chan interface{})
+ copyFinished := make(chan any)
reader := interceptor.pipe.reader
go func() {
io.Copy(destination, reader)
@@ -224,7 +224,7 @@ func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
- shutdown: make(chan interface{}),
+ shutdown: make(chan any),
implementation: &osGlobalReassigningOutputInterceptorImpl{},
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
index 8a237f4463..e0f1431d51 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
@@ -13,7 +13,7 @@ func NewOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
- shutdown: make(chan interface{}),
+ shutdown: make(chan any),
implementation: &dupSyscallOutputInterceptorImpl{},
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
new file mode 100644
index 0000000000..4c374935b8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
@@ -0,0 +1,7 @@
+//go:build wasm
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &NoopOutputInterceptor{}
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
index b3cd64292a..4234d802cf 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
@@ -30,7 +30,7 @@ type Server interface {
Close()
Address() string
RegisterAlive(node int, alive func() bool)
- GetSuiteDone() chan interface{}
+ GetSuiteDone() chan any
GetOutputDestination() io.Writer
SetOutputDestination(io.Writer)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
index 6547c7a66e..4aa10ae4f9 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
@@ -34,7 +34,7 @@ func (client *httpClient) Close() error {
return nil
}
-func (client *httpClient) post(path string, data interface{}) error {
+func (client *httpClient) post(path string, data any) error {
var body io.Reader
if data != nil {
encoded, err := json.Marshal(data)
@@ -54,7 +54,7 @@ func (client *httpClient) post(path string, data interface{}) error {
return nil
}
-func (client *httpClient) poll(path string, data interface{}) error {
+func (client *httpClient) poll(path string, data any) error {
for {
resp, err := http.Get(client.serverHost + path)
if err != nil {
@@ -153,10 +153,7 @@ func (client *httpClient) PostAbort() error {
func (client *httpClient) ShouldAbort() bool {
err := client.poll("/abort", nil)
- if err == ErrorGone {
- return true
- }
- return false
+ return err == ErrorGone
}
func (client *httpClient) Write(p []byte) (int, error) {
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
index d2c71ab1b2..8a1b7a5bbe 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
@@ -75,7 +75,7 @@ func (server *httpServer) Address() string {
return "http://" + server.listener.Addr().String()
}
-func (server *httpServer) GetSuiteDone() chan interface{} {
+func (server *httpServer) GetSuiteDone() chan any {
return server.handler.done
}
@@ -96,7 +96,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) {
//
// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
-func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
+func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object any) bool {
defer request.Body.Close()
if json.NewDecoder(request.Body).Decode(object) != nil {
writer.WriteHeader(http.StatusBadRequest)
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
index 59e8e6fd0a..bb4675a02c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
@@ -35,7 +35,7 @@ func (client *rpcClient) Close() error {
return client.client.Close()
}
-func (client *rpcClient) poll(method string, data interface{}) error {
+func (client *rpcClient) poll(method string, data any) error {
for {
err := client.client.Call(method, voidSender, data)
if err == nil {
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
index 2620fd562d..1574f99ac4 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
@@ -25,7 +25,7 @@ type RPCServer struct {
handler *ServerHandler
}
-//Create a new server, automatically selecting a port
+// Create a new server, automatically selecting a port
func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
@@ -37,7 +37,7 @@ func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, e
}, nil
}
-//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+// Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *RPCServer) Start() {
rpcServer := rpc.NewServer()
rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
@@ -48,17 +48,17 @@ func (server *RPCServer) Start() {
go httpServer.Serve(server.listener)
}
-//Stop the server
+// Stop the server
func (server *RPCServer) Close() {
server.listener.Close()
}
-//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+// The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *RPCServer) Address() string {
return server.listener.Addr().String()
}
-func (server *RPCServer) GetSuiteDone() chan interface{} {
+func (server *RPCServer) GetSuiteDone() chan any {
return server.handler.done
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
index a6d98793e9..ab9e11372c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
@@ -18,7 +18,7 @@ var voidSender Void
// It handles all the business logic to avoid duplication between the two servers
type ServerHandler struct {
- done chan interface{}
+ done chan any
outputDestination io.Writer
reporter reporters.Reporter
alives []func() bool
@@ -46,7 +46,7 @@ func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHan
parallelTotal: parallelTotal,
outputDestination: os.Stdout,
- done: make(chan interface{}),
+ done: make(chan any),
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
index 11269cf1f2..165cbc4b67 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
@@ -236,7 +236,7 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
}
functionCall.Filename = line[:delimiterIdx]
line = strings.Split(line[delimiterIdx+1:], " ")[0]
- lineNumber, err := strconv.ParseInt(line, 10, 64)
+ lineNumber, err := strconv.ParseInt(line, 10, 32)
functionCall.Line = int(lineNumber)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
new file mode 100644
index 0000000000..8c53fe0ada
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
@@ -0,0 +1,10 @@
+//go:build wasm
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
index cc351a39bd..9c18dc8e58 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
@@ -8,7 +8,7 @@ import (
type ReportEntry = types.ReportEntry
-func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
+func NewReportEntry(name string, cl types.CodeLocation, args ...any) (ReportEntry, error) {
out := ReportEntry{
Visibility: types.ReportEntryVisibilityAlways,
Name: name,
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go
new file mode 100644
index 0000000000..8b7a9ceabf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go
@@ -0,0 +1,158 @@
+package reporters
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/go/packages"
+)
+
+func ptr[T any](in T) *T {
+ return &in
+}
+
+type encoder interface {
+ Encode(v any) error
+}
+
+// gojsonEvent matches the format from go internals
+// https://github.com/golang/go/blob/master/src/cmd/internal/test2json/test2json.go#L31-L41
+// https://pkg.go.dev/cmd/test2json
+type gojsonEvent struct {
+ Time *time.Time `json:",omitempty"`
+ Action GoJSONAction
+ Package string `json:",omitempty"`
+ Test string `json:",omitempty"`
+ Elapsed *float64 `json:",omitempty"`
+ Output *string `json:",omitempty"`
+ FailedBuild string `json:",omitempty"`
+}
+
+type GoJSONAction string
+
+const (
+ // start - the test binary is about to be executed
+ GoJSONStart GoJSONAction = "start"
+ // run - the test has started running
+ GoJSONRun GoJSONAction = "run"
+ // pause - the test has been paused
+ GoJSONPause GoJSONAction = "pause"
+ // cont - the test has continued running
+ GoJSONCont GoJSONAction = "cont"
+ // pass - the test passed
+ GoJSONPass GoJSONAction = "pass"
+ // bench - the benchmark printed log output but did not fail
+ GoJSONBench GoJSONAction = "bench"
+ // fail - the test or benchmark failed
+ GoJSONFail GoJSONAction = "fail"
+ // output - the test printed output
+ GoJSONOutput GoJSONAction = "output"
+ // skip - the test was skipped or the package contained no tests
+ GoJSONSkip GoJSONAction = "skip"
+)
+
+func goJSONActionFromSpecState(state types.SpecState) GoJSONAction {
+ switch state {
+ case types.SpecStateInvalid:
+ return GoJSONFail
+ case types.SpecStatePending:
+ return GoJSONSkip
+ case types.SpecStateSkipped:
+ return GoJSONSkip
+ case types.SpecStatePassed:
+ return GoJSONPass
+ case types.SpecStateFailed:
+ return GoJSONFail
+ case types.SpecStateAborted:
+ return GoJSONFail
+ case types.SpecStatePanicked:
+ return GoJSONFail
+ case types.SpecStateInterrupted:
+ return GoJSONFail
+ case types.SpecStateTimedout:
+ return GoJSONFail
+ default:
+ panic("unexpected state should not happen")
+ }
+}
+
+// gojsonReport wraps types.Report and calcualtes extra fields requires by gojson
+type gojsonReport struct {
+ o types.Report
+ // Extra calculated fields
+ goPkg string
+ elapsed float64
+}
+
+func newReport(in types.Report) *gojsonReport {
+ return &gojsonReport{
+ o: in,
+ }
+}
+
+func (r *gojsonReport) Fill() error {
+ // NOTE: could the types.Report include the go package name?
+ goPkg, err := suitePathToPkg(r.o.SuitePath)
+ if err != nil {
+ return err
+ }
+ r.goPkg = goPkg
+ r.elapsed = r.o.RunTime.Seconds()
+ return nil
+}
+
+// gojsonSpecReport wraps types.SpecReport and calculates extra fields required by gojson
+type gojsonSpecReport struct {
+ o types.SpecReport
+ // extra calculated fields
+ testName string
+ elapsed float64
+ action GoJSONAction
+}
+
+func newSpecReport(in types.SpecReport) *gojsonSpecReport {
+ return &gojsonSpecReport{
+ o: in,
+ }
+}
+
+func (sr *gojsonSpecReport) Fill() error {
+ sr.elapsed = sr.o.RunTime.Seconds()
+ sr.testName = createTestName(sr.o)
+ sr.action = goJSONActionFromSpecState(sr.o.State)
+ return nil
+}
+
+func suitePathToPkg(dir string) (string, error) {
+ cfg := &packages.Config{
+ Mode: packages.NeedFiles | packages.NeedSyntax,
+ }
+ pkgs, err := packages.Load(cfg, dir)
+ if err != nil {
+ return "", err
+ }
+ if len(pkgs) != 1 {
+ return "", errors.New("error")
+ }
+ return pkgs[0].ID, nil
+}
+
+func createTestName(spec types.SpecReport) string {
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ semVerConstraints := spec.SemVerConstraints()
+ if len(semVerConstraints) > 0 {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
+ name = strings.TrimSpace(name)
+ return name
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go
new file mode 100644
index 0000000000..ec5311d069
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go
@@ -0,0 +1,111 @@
+package reporters
+
+type GoJSONEventWriter struct {
+ enc encoder
+ specSystemErrFn specSystemExtractFn
+ specSystemOutFn specSystemExtractFn
+}
+
+func NewGoJSONEventWriter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONEventWriter {
+ return &GoJSONEventWriter{
+ enc: enc,
+ specSystemErrFn: errFn,
+ specSystemOutFn: outFn,
+ }
+}
+
+func (r *GoJSONEventWriter) writeEvent(e *gojsonEvent) error {
+ return r.enc.Encode(e)
+}
+
+func (r *GoJSONEventWriter) WriteSuiteStart(report *gojsonReport) error {
+ e := &gojsonEvent{
+ Time: &report.o.StartTime,
+ Action: GoJSONStart,
+ Package: report.goPkg,
+ Output: nil,
+ FailedBuild: "",
+ }
+ return r.writeEvent(e)
+}
+
+func (r *GoJSONEventWriter) WriteSuiteResult(report *gojsonReport) error {
+ var action GoJSONAction
+ switch {
+ case report.o.PreRunStats.SpecsThatWillRun == 0:
+ action = GoJSONSkip
+ case report.o.SuiteSucceeded:
+ action = GoJSONPass
+ default:
+ action = GoJSONFail
+ }
+ e := &gojsonEvent{
+ Time: &report.o.EndTime,
+ Action: action,
+ Package: report.goPkg,
+ Output: nil,
+ FailedBuild: "",
+ Elapsed: ptr(report.elapsed),
+ }
+ return r.writeEvent(e)
+}
+
+func (r *GoJSONEventWriter) WriteSpecStart(report *gojsonReport, specReport *gojsonSpecReport) error {
+ e := &gojsonEvent{
+ Time: &specReport.o.StartTime,
+ Action: GoJSONRun,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Output: nil,
+ FailedBuild: "",
+ }
+ return r.writeEvent(e)
+}
+
+func (r *GoJSONEventWriter) WriteSpecOut(report *gojsonReport, specReport *gojsonSpecReport) error {
+ events := []*gojsonEvent{}
+
+ stdErr := r.specSystemErrFn(specReport.o)
+ if stdErr != "" {
+ events = append(events, &gojsonEvent{
+ Time: &specReport.o.EndTime,
+ Action: GoJSONOutput,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Output: ptr(stdErr),
+ FailedBuild: "",
+ })
+ }
+ stdOut := r.specSystemOutFn(specReport.o)
+ if stdOut != "" {
+ events = append(events, &gojsonEvent{
+ Time: &specReport.o.EndTime,
+ Action: GoJSONOutput,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Output: ptr(stdOut),
+ FailedBuild: "",
+ })
+ }
+
+ for _, ev := range events {
+ err := r.writeEvent(ev)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *GoJSONEventWriter) WriteSpecResult(report *gojsonReport, specReport *gojsonSpecReport) error {
+ e := &gojsonEvent{
+ Time: &specReport.o.EndTime,
+ Action: specReport.action,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Elapsed: ptr(specReport.elapsed),
+ Output: nil,
+ FailedBuild: "",
+ }
+ return r.writeEvent(e)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go
new file mode 100644
index 0000000000..633e49b88d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go
@@ -0,0 +1,45 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type GoJSONReporter struct {
+ ev *GoJSONEventWriter
+}
+
+type specSystemExtractFn func (spec types.SpecReport) string
+
+func NewGoJSONReporter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONReporter {
+ return &GoJSONReporter{
+ ev: NewGoJSONEventWriter(enc, errFn, outFn),
+ }
+}
+
+func (r *GoJSONReporter) Write(originalReport types.Report) error {
+ // suite start events
+ report := newReport(originalReport)
+ err := report.Fill()
+ if err != nil {
+ return err
+ }
+ r.ev.WriteSuiteStart(report)
+ for _, originalSpecReport := range originalReport.SpecReports {
+ specReport := newSpecReport(originalSpecReport)
+ err := specReport.Fill()
+ if err != nil {
+ return err
+ }
+ if specReport.o.LeafNodeType == types.NodeTypeIt {
+ // handle any It leaf node as a spec
+ r.ev.WriteSpecStart(report, specReport)
+ r.ev.WriteSpecOut(report, specReport)
+ r.ev.WriteSpecResult(report, specReport)
+ } else {
+ // handle any other leaf node as generic output
+ r.ev.WriteSpecOut(report, specReport)
+ }
+ }
+ r.ev.WriteSuiteResult(report)
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
index 2515b84a14..99c9c5f5be 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
@@ -2,6 +2,7 @@ package internal
import (
"context"
+ "reflect"
"github.com/onsi/ginkgo/v2/types"
)
@@ -11,13 +12,14 @@ type SpecContext interface {
SpecReport() types.SpecReport
AttachProgressReporter(func() string) func()
+ WrappedContext() context.Context
}
type specContext struct {
context.Context
*ProgressReporterManager
- cancel context.CancelFunc
+ cancel context.CancelCauseFunc
suite *Suite
}
@@ -30,7 +32,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con
This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
*/
func NewSpecContext(suite *Suite) *specContext {
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancelCause(context.Background())
sc := &specContext{
cancel: cancel,
suite: suite,
@@ -45,3 +47,28 @@ func NewSpecContext(suite *Suite) *specContext {
func (sc *specContext) SpecReport() types.SpecReport {
return sc.suite.CurrentSpecReport()
}
+
+func (sc *specContext) WrappedContext() context.Context {
+ return sc.Context
+}
+
+/*
+The user is allowed to wrap `SpecContext` in a new context.Context when using AroundNodes. But body functions expect SpecContext.
+We support this by taking their context.Context and returning a SpecContext that wraps it.
+*/
+func wrapContextChain(ctx context.Context) SpecContext {
+ if ctx == nil {
+ return nil
+ }
+ if reflect.TypeOf(ctx) == reflect.TypeOf(&specContext{}) {
+ return ctx.(*specContext)
+ } else if sc, ok := ctx.Value("GINKGO_SPEC_CONTEXT").(*specContext); ok {
+ return &specContext{
+ Context: ctx,
+ ProgressReporterManager: sc.ProgressReporterManager,
+ cancel: sc.cancel,
+ suite: sc.suite,
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
index fe6e8288ad..ef76cd099e 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -1,6 +1,7 @@
package internal
import (
+ "context"
"fmt"
"sync"
"time"
@@ -9,7 +10,6 @@ import (
"github.com/onsi/ginkgo/v2/internal/parallel_support"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
- "golang.org/x/net/context"
)
type Phase uint
@@ -20,7 +20,7 @@ const (
PhaseRun
)
-var PROGRESS_REPORTER_DEADLING = 5 * time.Second
+const ProgressReporterDeadline = 5 * time.Second
type Suite struct {
tree *TreeNode
@@ -32,6 +32,7 @@ type Suite struct {
suiteNodes Nodes
cleanupNodes Nodes
+ aroundNodes types.AroundNodes
failer *Failer
reporter reporters.Reporter
@@ -41,6 +42,8 @@ type Suite struct {
config types.SuiteConfig
deadline time.Time
+ currentConstructionNodeReport *types.ConstructionNodeReport
+
skipAll bool
report types.Report
currentSpecReport types.SpecReport
@@ -79,7 +82,7 @@ func NewSuite() *Suite {
func (suite *Suite) Clone() (*Suite, error) {
if suite.phase != PhaseBuildTopLevel {
- return nil, fmt.Errorf("cnanot clone suite after tree has been built")
+ return nil, fmt.Errorf("cannot clone suite after tree has been built")
}
return &Suite{
tree: &TreeNode{},
@@ -87,6 +90,7 @@ func (suite *Suite) Clone() (*Suite, error) {
ProgressReporterManager: NewProgressReporterManager(),
topLevelContainers: suite.topLevelContainers.Clone(),
suiteNodes: suite.suiteNodes.Clone(),
+ aroundNodes: suite.aroundNodes.Clone(),
selectiveLock: &sync.Mutex{},
}, nil
}
@@ -104,13 +108,14 @@ func (suite *Suite) BuildTree() error {
return nil
}
-func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
+func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
if suite.phase != PhaseBuildTree {
panic("cannot run before building the tree = call suite.BuildTree() first")
}
ApplyNestedFocusPolicyToTree(suite.tree)
specs := GenerateSpecsFromTreeRoot(suite.tree)
- specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
+ specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig)
+ specs = ComputeAroundNodes(specs)
suite.phase = PhaseRun
suite.client = client
@@ -120,6 +125,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string
suite.outputInterceptor = outputInterceptor
suite.interruptHandler = interruptHandler
suite.config = suiteConfig
+ suite.aroundNodes = suiteAroundNodes
if suite.config.Timeout > 0 {
suite.deadline = time.Now().Add(suite.config.Timeout)
@@ -127,7 +133,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string
cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
- success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
+ success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs)
cancelProgressHandler()
@@ -199,6 +205,14 @@ func (suite *Suite) PushNode(node Node) error {
err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
}
}()
+
+ // Ensure that code running in the body of the container node
+ // has access to information about the current container node(s).
+ suite.currentConstructionNodeReport = constructionNodeReportForTreeNode(suite.tree)
+ defer func() {
+ suite.currentConstructionNodeReport = nil
+ }()
+
node.Body(nil)
return err
}()
@@ -259,6 +273,7 @@ func (suite *Suite) pushCleanupNode(node Node) error {
node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
node.NestingLevel = suite.currentNode.NestingLevel
+ node.AroundNodes = types.AroundNodes{}.Append(suite.currentNode.AroundNodes...).Append(node.AroundNodes...)
suite.selectiveLock.Lock()
suite.cleanupNodes = append(suite.cleanupNodes, node)
suite.selectiveLock.Unlock()
@@ -327,6 +342,16 @@ func (suite *Suite) By(text string, callback ...func()) error {
return nil
}
+func (suite *Suite) CurrentConstructionNodeReport() types.ConstructionNodeReport {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ report := suite.currentConstructionNodeReport
+ if report == nil {
+ panic("CurrentConstructionNodeReport may only be called during construction of the spec tree")
+ }
+ return *report
+}
+
/*
Spec Running methods - used during PhaseRun
*/
@@ -370,7 +395,7 @@ func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
- deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
+ deadline, cancel := context.WithTimeout(context.Background(), ProgressReporterDeadline)
defer cancel()
var additionalReports []string
if suite.currentSpecContext != nil {
@@ -428,13 +453,14 @@ func (suite *Suite) processCurrentSpecReport() {
}
}
-func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
+func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
numSpecsThatWillBeRun := specs.CountWithoutSkip()
suite.report = types.Report{
SuitePath: suitePath,
SuiteDescription: description,
SuiteLabels: suiteLabels,
+ SuiteSemVerConstraints: suiteSemVerConstraints,
SuiteConfig: suite.config,
SuiteHasProgrammaticFocus: hasProgrammaticFocus,
PreRunStats: types.PreRunStats{
@@ -489,10 +515,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
}
- if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
+ if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
suite.report.SuiteSucceeded = false
}
+
+ if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set")
+ suite.report.SuiteSucceeded = false
+ }
}
if ranBeforeSuite {
@@ -594,8 +625,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
report := suite.currentSpecReport
- nodes[i].Body = func(SpecContext) {
- nodes[i].ReportEachBody(report)
+ nodes[i].Body = func(ctx SpecContext) {
+ nodes[i].ReportEachBody(ctx, report)
}
state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
@@ -762,7 +793,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
report = report.Add(aggregatedReport)
}
- node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
+ node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
suite.currentSpecReport.EndTime = time.Now()
@@ -840,7 +871,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
timeoutInPlay = "node"
}
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
- //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
+ // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
if node.NodeTimeout > 0 {
deadline = now.Add(node.NodeTimeout)
timeoutInPlay = "node"
@@ -858,7 +889,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
}
sc := NewSpecContext(suite)
- defer sc.cancel()
+ defer sc.cancel(fmt.Errorf("spec has finished"))
suite.selectiveLock.Lock()
suite.currentSpecContext = sc
@@ -886,7 +917,30 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
failureC <- failureFromRun
}()
- node.Body(sc)
+ aroundNodes := types.AroundNodes{}.Append(suite.aroundNodes...).Append(node.AroundNodes...)
+ if len(aroundNodes) > 0 {
+ i := 0
+ var f func(context.Context)
+ f = func(c context.Context) {
+ sc := wrapContextChain(c)
+ if sc == nil {
+ suite.failer.Fail("An AroundNode failed to pass a valid Ginkgo SpecContext in. You must always pass in a context derived from the context passed to you.", aroundNodes[i].CodeLocation)
+ return
+ }
+ i++
+ if i < len(aroundNodes) {
+ aroundNodes[i].Body(sc, f)
+ } else {
+ node.Body(sc)
+ }
+ }
+ aroundNodes[0].Body(sc, f)
+ if i != len(aroundNodes) {
+ suite.failer.Fail("An AroundNode failed to call the passed in function.", aroundNodes[i].CodeLocation)
+ }
+ } else {
+ node.Body(sc)
+ }
finished = true
}()
@@ -918,9 +972,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
if outcomeFromRun != types.SpecStatePassed {
additionalFailure := types.AdditionalFailure{
State: outcomeFromRun,
- Failure: failure, //we make a copy - this will include all the configuration set up above...
+ Failure: failure, // we make a copy - this will include all the configuration set up above...
}
- //...and then we update the failure with the details from failureFromRun
+ // ...and then we update the failure with the details from failureFromRun
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
if outcome == types.SpecStateTimedout {
@@ -958,8 +1012,8 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
// the spec is actually stuck
- sc.cancel()
- //and now we wait for the grace period
+ sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
+ // and now we wait for the grace period
gracePeriodChannel = time.After(gracePeriod)
case <-interruptStatus.Channel:
interruptStatus = suite.interruptHandler.Status()
@@ -985,7 +1039,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
}
progressReport = progressReport.WithoutOtherGoroutines()
- sc.cancel()
+ sc.cancel(fmt.Errorf(interruptStatus.Message()))
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
if interruptStatus.ShouldIncludeProgressReport() {
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
index 73e2655656..9806e315a6 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
@@ -1,6 +1,7 @@
package testingtproxy
import (
+ "context"
"fmt"
"io"
"os"
@@ -19,9 +20,9 @@ type addReportEntryFunc func(names string, args ...any)
type ginkgoWriterInterface interface {
io.Writer
- Print(a ...interface{})
- Printf(format string, a ...interface{})
- Println(a ...interface{})
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
}
type ginkgoRecoverFunc func()
type attachProgressReporterFunc func(func() string) func()
@@ -80,11 +81,31 @@ func (t *ginkgoTestingTProxy) Setenv(key, value string) {
}
}
-func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Chdir(dir string) {
+ currentDir, err := os.Getwd()
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to get current directory: %v", err), 1)
+ }
+
+ t.cleanup(os.Chdir, currentDir, internal.Offset(1))
+
+ err = os.Chdir(dir)
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to change directory: %v", err), 1)
+ }
+}
+
+func (t *ginkgoTestingTProxy) Context() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ t.cleanup(cancel, internal.Offset(1))
+ return ctx
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...any) {
t.fail(fmt.Sprintln(args...), t.offset)
}
-func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...any) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
@@ -100,11 +121,11 @@ func (t *ginkgoTestingTProxy) Failed() bool {
return t.report().Failed()
}
-func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Fatal(args ...any) {
t.fail(fmt.Sprintln(args...), t.offset)
}
-func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...any) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
@@ -112,11 +133,11 @@ func (t *ginkgoTestingTProxy) Helper() {
types.MarkAsHelper(1)
}
-func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Log(args ...any) {
fmt.Fprintln(t.writer, args...)
}
-func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Logf(format string, args ...any) {
t.Log(fmt.Sprintf(format, args...))
}
@@ -128,7 +149,7 @@ func (t *ginkgoTestingTProxy) Parallel() {
// No-op
}
-func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Skip(args ...any) {
t.skip(fmt.Sprintln(args...), t.offset)
}
@@ -136,7 +157,7 @@ func (t *ginkgoTestingTProxy) SkipNow() {
t.skip("skip", t.offset)
}
-func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...any) {
t.skip(fmt.Sprintf(format, args...), t.offset)
}
@@ -208,3 +229,9 @@ func (t *ginkgoTestingTProxy) ParallelTotal() int {
func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
return t.attachProgressReporter(f)
}
+func (t *ginkgoTestingTProxy) Output() io.Writer {
+ return t.writer
+}
+func (t *ginkgoTestingTProxy) Attr(key, value string) {
+ t.addReportEntry(key, value, internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
index aab42d5fb3..1c4e0534e4 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -121,15 +121,15 @@ func (w *Writer) ClearTeeWriters() {
w.teeWriters = []io.Writer{}
}
-func (w *Writer) Print(a ...interface{}) {
+func (w *Writer) Print(a ...any) {
fmt.Fprint(w, a...)
}
-func (w *Writer) Printf(format string, a ...interface{}) {
+func (w *Writer) Printf(format string, a ...any) {
fmt.Fprintf(w, format, a...)
}
-func (w *Writer) Println(a ...interface{}) {
+func (w *Writer) Println(a ...any) {
fmt.Fprintln(w, a...)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
index 56b7be7587..026d9cf9b3 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
if len(report.SuiteLabels) > 0 {
r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
}
+ if len(report.SuiteSemVerConstraints) > 0 {
+ r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", ")))
+ }
r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
if report.SuiteConfig.ParallelTotal > 1 {
r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
@@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
bannerWidth = len(labels) + 2
}
}
+ if len(report.SuiteSemVerConstraints) > 0 {
+ semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ")
+ r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints))
+ if len(semVerConstraints)+2 > bannerWidth {
+ bannerWidth = len(semVerConstraints) + 2
+ }
+ }
r.emitBlock(strings.Repeat("=", bannerWidth))
out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
@@ -182,10 +192,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) {
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
}
+func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
+ r.emitBlock("\n")
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::group::%s", sectionName))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
+ }
+ fn()
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::endgroup::"))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
+ }
+
+}
+
func (r *DefaultReporter) DidRun(report types.SpecReport) {
v := r.conf.Verbosity()
inParallel := report.RunningInParallel
+ //should we completely omit this spec?
+ if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
+ return
+ }
+
header := r.specDenoter
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
header = fmt.Sprintf("[%s]", report.LeafNodeType)
@@ -262,9 +293,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
}
}
- // If we have no content to show, jsut emit the header and return
+ // If we have no content to show, just emit the header and return
if !reportHasContent {
r.emit(r.f(highlightColor + header + "{{/}}"))
+ if r.conf.ForceNewlines {
+ r.emit("\n")
+ }
return
}
@@ -283,26 +317,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
//Emit Stdout/Stderr Output
if showSeparateStdSection {
- r.emitBlock("\n")
- r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
- r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
- r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
+ r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
+ r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
+ })
}
if showSeparateVisibilityAlwaysReportsSection {
- r.emitBlock("\n")
- r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
- for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
- r.emitReportEntry(1, entry)
- }
- r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
+ r.wrapTextBlock("Report Entries", func() {
+ for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
+ r.emitReportEntry(1, entry)
+ }
+ })
}
if showTimeline {
- r.emitBlock("\n")
- r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
- r.emitTimeline(1, report, timeline)
- r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
+ r.wrapTextBlock("Timeline", func() {
+ r.emitTimeline(1, report, timeline)
+ })
}
// Emit Failure Message
@@ -350,13 +381,22 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim
cursor := 0
for _, entry := range timeline {
tl := entry.GetTimelineLocation()
- if tl.Offset < len(gw) {
- r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
- cursor = tl.Offset
- } else if cursor < len(gw) {
+
+ end := tl.Offset
+ if end > len(gw) {
+ end = len(gw)
+ }
+ if end < cursor {
+ end = cursor
+ }
+ if cursor < end && cursor <= len(gw) && end <= len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:end]))
+ cursor = end
+ } else if cursor < len(gw) && end == len(gw) {
r.emit(r.fi(indent, "%s", gw[cursor:]))
cursor = len(gw)
}
+
switch x := entry.(type) {
case types.Failure:
if isVeryVerbose {
@@ -373,7 +413,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim
case types.ReportEntry:
r.emitReportEntry(indent, x)
case types.ProgressReport:
- r.emitProgressReport(indent, false, x)
+ r.emitProgressReport(indent, false, isVeryVerbose, x)
case types.SpecEvent:
if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
r.emitSpecEvent(indent, x, isVeryVerbose)
@@ -405,7 +445,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
highlightColor := r.highlightColorForState(state)
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
- r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ if r.conf.GithubOutput {
+ level := "error"
+ if state.Is(types.SpecStateSkipped) {
+ level = "notice"
+ }
+ r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ } else {
+ r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
if failure.ForwardedPanic != "" {
r.emitBlock("\n")
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
@@ -419,7 +467,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur
if !failure.ProgressReport.IsZero() {
r.emitBlock("\n")
- r.emitProgressReport(indent, false, failure.ProgressReport)
+ r.emitProgressReport(indent, false, false, failure.ProgressReport)
}
if failure.AdditionalFailure != nil && includeAdditionalFailure {
@@ -435,11 +483,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
}
shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
- r.emitProgressReport(1, shouldEmitGW, report)
+ r.emitProgressReport(1, shouldEmitGW, true, report)
r.emitDelimiter(1)
}
-func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
+func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) {
if report.Message != "" {
r.emitBlock(r.fi(indent, report.Message+"\n"))
indent += 1
@@ -475,6 +523,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
indent -= 1
}
+ if r.conf.GithubOutput && emitGroup {
+ r.emitBlock(r.fi(indent, "::group::Progress Report"))
+ }
+
if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
r.emit("\n")
r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
@@ -521,6 +573,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n"))
r.emitGoroutines(indent, otherGoroutines...)
}
+
+ if r.conf.GithubOutput && emitGroup {
+ r.emitBlock(r.fi(indent, "::endgroup::"))
+ }
}
func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
@@ -656,11 +712,11 @@ func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
}
/* Rendering text */
-func (r *DefaultReporter) f(format string, args ...interface{}) string {
+func (r *DefaultReporter) f(format string, args ...any) string {
return r.formatter.F(format, args...)
}
-func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
+func (r *DefaultReporter) fi(indentation uint, format string, args ...any) string {
return r.formatter.Fi(indentation, format, args...)
}
@@ -669,8 +725,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
}
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
- texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
- texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
+ texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{}
+ texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...)
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
@@ -678,6 +734,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
texts = append(texts, r.f(report.LeafNodeText))
}
labels = append(labels, report.LeafNodeLabels)
+ semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints)
locations = append(locations, report.LeafNodeLocation)
failureLocation := report.Failure.FailureNodeLocation
@@ -691,6 +748,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
locations = append([]types.CodeLocation{failureLocation}, locations...)
labels = append([][]string{{}}, labels...)
+ semVerConstraints = append([][]string{{}}, semVerConstraints...)
highlightIndex = 0
case types.FailureNodeInContainer:
i := report.Failure.FailureNodeContainerIndex
@@ -718,6 +776,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
if len(labels[i]) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
}
+ if len(semVerConstraints[i]) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", "))
+ }
out += "\n"
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
}
@@ -741,6 +802,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
if len(flattenedLabels) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
}
+ flattenedSemVerConstraints := report.SemVerConstraints()
+ if len(flattenedSemVerConstraints) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", "))
+ }
out += "\n"
if usePreciseFailureLocation {
out += r.f("{{gray}}%s{{/}}", failureLocation)
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go
new file mode 100644
index 0000000000..d02fb7a1ae
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go
@@ -0,0 +1,61 @@
+package reporters
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/onsi/ginkgo/v2/internal/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// GenerateGoTestJSONReport produces a JSON-formatted in the test2json format used by `go test -json`
+func GenerateGoTestJSONReport(report types.Report, destination string) error {
+ // walk report and generate test2json-compatible objects
+ // JSON-encode the objects into filename
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ r := reporters.NewGoJSONReporter(
+ enc,
+ systemErrForUnstructuredReporters,
+ systemOutForUnstructuredReporters,
+ )
+ return r.Write(report)
+}
+
+// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
+// It skips over reports that fail to decode but reports on them via the returned messages []string
+func MergeAndCleanupGoTestJSONReports(sources []string, destination string) ([]string, error) {
+ messages := []string{}
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return messages, err
+ }
+ defer f.Close()
+
+ for _, source := range sources {
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ _, err = f.Write(data)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not write to %s:\n%s", destination, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ }
+ return messages, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
index be506f9b47..5d3e8db994 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
@@ -18,6 +18,7 @@ func GenerateJSONReport(report types.Report, destination string) error {
if err != nil {
return err
}
+ defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
err = enc.Encode([]types.Report{
@@ -26,7 +27,7 @@ func GenerateJSONReport(report types.Report, destination string) error {
if err != nil {
return err
}
- return f.Close()
+ return nil
}
// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
@@ -57,11 +58,12 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string,
if err != nil {
return messages, err
}
+ defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
err = enc.Encode(allReports)
if err != nil {
return messages, err
}
- return messages, f.Close()
+ return messages, nil
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
index 816042208c..828f893fb8 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
@@ -15,6 +15,7 @@ import (
"fmt"
"os"
"path"
+ "regexp"
"strings"
"github.com/onsi/ginkgo/v2/config"
@@ -35,6 +36,9 @@ type JunitReportConfig struct {
// Enable OmitSpecLabels to prevent labels from appearing in the spec name
OmitSpecLabels bool
+ // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name
+ OmitSpecSemVerConstraints bool
+
// Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
OmitLeafNodeType bool
@@ -104,6 +108,8 @@ type JUnitProperty struct {
Value string `xml:"value,attr"`
}
+var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
+
type JUnitTestCase struct {
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
Name string `xml:"name,attr"`
@@ -113,6 +119,8 @@ type JUnitTestCase struct {
Status string `xml:"status,attr"`
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
Time float64 `xml:"time,attr"`
+ // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
+ Owner string `xml:"owner,attr,omitempty"`
//Skipped is populated with a message if the test was skipped or pending
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
//Error is populated if the test panicked or was interrupted
@@ -164,14 +172,17 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
{"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
{"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
{"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
+ {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))},
{"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
{"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
{"LabelFilter", report.SuiteConfig.LabelFilter},
+ {"SemVerFilter", report.SuiteConfig.SemVerFilter},
{"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
{"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
+ {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
@@ -195,6 +206,16 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
if len(labels) > 0 && !config.OmitSpecLabels {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
+ owner := ""
+ for _, label := range labels {
+ if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
+ owner = matches[1]
+ }
+ }
+ semVerConstraints := spec.SemVerConstraints()
+ if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
name = strings.TrimSpace(name)
test := JUnitTestCase{
@@ -202,6 +223,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
Classname: report.SuiteDescription,
Status: spec.State.String(),
Time: spec.RunTime.Seconds(),
+ Owner: owner,
}
if !spec.State.Is(config.OmitTimelinesForSpecState) {
test.SystemErr = systemErrForUnstructuredReporters(spec)
@@ -312,6 +334,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
continue
}
err = xml.NewDecoder(f).Decode(&report)
+ _ = f.Close()
if err != nil {
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
continue
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
index e990ad82e1..55e1d1f4f7 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
@@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error {
name := report.SuiteDescription
labels := report.SuiteLabels
+ semVerConstraints := report.SuiteSemVerConstraints
if len(labels) > 0 {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
+ if len(semVerConstraints) > 0 {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
for _, spec := range report.SpecReports {
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
@@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error {
if len(labels) > 0 {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
+ semVerConstraints := spec.SemVerConstraints()
+ if len(semVerConstraints) > 0 {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
name = tcEscape(name)
fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
index f33786a2d6..4e86dba84d 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -27,6 +27,8 @@ CurrentSpecReport returns information about the current running spec.
The returned object is a types.SpecReport which includes helper methods
to make extracting information about the spec easier.
+During construction of the test tree the result is empty.
+
You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport
You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
*/
@@ -34,6 +36,31 @@ func CurrentSpecReport() SpecReport {
return global.Suite.CurrentSpecReport()
}
+/*
+ConstructionNodeReport describes the container nodes during construction of
+the spec tree. It provides a subset of the information that is provided
+by SpecReport at runtime.
+
+It is documented here: [types.ConstructionNodeReport]
+*/
+type ConstructionNodeReport = types.ConstructionNodeReport
+
+/*
+CurrentConstructionNodeReport returns information about the current container nodes
+that are leading to the current path in the spec tree.
+The returned object is a types.ConstructionNodeReport which includes helper methods
+to make extracting information about the spec easier.
+
+May only be called during construction of the spec tree. It panics when
+called while tests are running. Use CurrentSpecReport instead in that
+phase.
+
+You can learn more about ConstructionNodeReport here: [types.ConstructionNodeReport]
+*/
+func CurrentTreeConstructionNodeReport() ConstructionNodeReport {
+ return global.Suite.CurrentConstructionNodeReport()
+}
+
/*
ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
@@ -60,7 +87,7 @@ AddReportEntry() must be called within a Subject or Setup node - not in a Contai
You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
*/
-func AddReportEntry(name string, args ...interface{}) {
+func AddReportEntry(name string, args ...any) {
cl := types.NewCodeLocation(1)
reportEntry, err := internal.NewReportEntry(name, cl, args...)
if err != nil {
@@ -74,34 +101,61 @@ func AddReportEntry(name string, args ...interface{}) {
/*
ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
-receives a SpecReport. They are called before the spec starts.
+receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
+
+Example:
+
+ ReportBeforeEach(func(report SpecReport) { // process report })
+ ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func ReportBeforeEach(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)))
}
/*
-ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that
-receives a SpecReport. They are called after the spec has completed and receive the final report for the spec.
+ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
+ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
+They are called after the spec has completed and receive the final report for the spec.
+
+Example:
+
+ ReportAfterEach(func(report SpecReport) { // process report })
+ ReportAfterEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func ReportAfterEach(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)))
}
/*
-ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report.
+ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
+that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportBeforeSuite(func(r Report) { // process report })
+ ReportBeforeSuite(func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
@@ -112,33 +166,45 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportBeforeSuite(body func(Report), args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func ReportBeforeSuite(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)))
}
/*
-ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
+ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
+and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
+ ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
-ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
all parallel nodes
-In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags.
+In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, GoJSON, JUnit, and Teamcity formatted reports using the --json-report, --gojson-report, --junit-report, and --teamcity-report ginkgo CLI flags.
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func ReportAfterSuite(text string, body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)))
}
func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) {
@@ -149,6 +215,12 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re
Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error()))
}
}
+ if reporterConfig.GoJSONReport != "" {
+ err := reporters.GenerateGoTestJSONReport(report, reporterConfig.GoJSONReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Go JSON report:\n%s", err.Error()))
+ }
+ }
if reporterConfig.JUnitReport != "" {
err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport)
if err != nil {
@@ -167,6 +239,9 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re
if reporterConfig.JSONReport != "" {
flags = append(flags, "--json-report")
}
+ if reporterConfig.GoJSONReport != "" {
+ flags = append(flags, "--gojson-report")
+ }
if reporterConfig.JUnitReport != "" {
flags = append(flags, "--junit-report")
}
@@ -174,9 +249,11 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re
flags = append(flags, "--teamcity-report")
}
pushNode(internal.NewNode(
- deprecationTracker, types.NodeTypeReportAfterSuite,
- fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
- body,
- types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ internal.TransformNewNodeArgs(
+ exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite,
+ fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
+ body,
+ types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ ),
))
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
index ac9b7abb5e..1031aa8554 100644
--- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -23,7 +23,7 @@ You can learn more about generating EntryDescriptions here: https://onsi.github.
*/
type EntryDescription string
-func (ed EntryDescription) render(args ...interface{}) string {
+func (ed EntryDescription) render(args ...any) string {
return fmt.Sprintf(string(ed), args...)
}
@@ -44,29 +44,29 @@ For example:
You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
*/
-func DescribeTable(description string, args ...interface{}) bool {
+func DescribeTable(description string, args ...any) bool {
GinkgoHelper()
- generateTable(description, args...)
+ generateTable(description, false, args...)
return true
}
/*
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
*/
-func FDescribeTable(description string, args ...interface{}) bool {
+func FDescribeTable(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Focus)
- generateTable(description, args...)
+ generateTable(description, false, args...)
return true
}
/*
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
*/
-func PDescribeTable(description string, args ...interface{}) bool {
+func PDescribeTable(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Pending)
- generateTable(description, args...)
+ generateTable(description, false, args...)
return true
}
@@ -75,13 +75,78 @@ You can mark a table as pending with `XDescribeTable`. This is equivalent to `X
*/
var XDescribeTable = PDescribeTable
+/*
+DescribeTableSubtree describes a table-driven spec that generates a set of tests for each entry.
+
+For example:
+
+ DescribeTableSubtree("a subtree table",
+ func(url string, code int, message string) {
+ var resp *http.Response
+ BeforeEach(func() {
+ var err error
+ resp, err = http.Get(url)
+ Expect(err).NotTo(HaveOccurred())
+ DeferCleanup(resp.Body.Close)
+ })
+
+ It("should return the expected status code", func() {
+ Expect(resp.StatusCode).To(Equal(code))
+ })
+
+ It("should return the expected message", func() {
+ body, err := io.ReadAll(resp.Body)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(string(body)).To(Equal(message))
+ })
+ },
+ Entry("default response", "example.com/response", http.StatusOK, "hello world"),
+ Entry("missing response", "example.com/missing", http.StatusNotFound, "wat?"),
+ )
+
+Note that you **must** place define an It inside the body function.
+
+You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTableSubtree(description string, args ...any) bool {
+ GinkgoHelper()
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTableSubtree(description string, args ...any) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTableSubtree(description string, args ...any) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTableSubtree`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTableSubtree = PDescribeTableSubtree
+
/*
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
*/
type TableEntry struct {
- description interface{}
- decorations []interface{}
- parameters []interface{}
+ description any
+ decorations []any
+ parameters []any
codeLocation types.CodeLocation
}
@@ -97,7 +162,7 @@ If you want to generate interruptible specs simply write a Table function that a
You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
*/
-func Entry(description interface{}, args ...interface{}) TableEntry {
+func Entry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
@@ -106,7 +171,7 @@ func Entry(description interface{}, args ...interface{}) TableEntry {
/*
You can focus a particular entry with FEntry. This is equivalent to FIt.
*/
-func FEntry(description interface{}, args ...interface{}) TableEntry {
+func FEntry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
decorations = append(decorations, internal.Focus)
@@ -116,7 +181,7 @@ func FEntry(description interface{}, args ...interface{}) TableEntry {
/*
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
*/
-func PEntry(description interface{}, args ...interface{}) TableEntry {
+func PEntry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
decorations = append(decorations, internal.Pending)
@@ -131,17 +196,17 @@ var XEntry = PEntry
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
-func generateTable(description string, args ...interface{}) {
+func generateTable(description string, isSubtree bool, args ...any) {
GinkgoHelper()
cl := types.NewCodeLocation(0)
- containerNodeArgs := []interface{}{cl}
+ containerNodeArgs := []any{cl}
entries := []TableEntry{}
- var itBody interface{}
- var itBodyType reflect.Type
+ var internalBody any
+ var internalBodyType reflect.Type
- var tableLevelEntryDescription interface{}
- tableLevelEntryDescription = func(args ...interface{}) string {
+ var tableLevelEntryDescription any
+ tableLevelEntryDescription = func(args ...any) string {
out := []string{}
for _, arg := range args {
out = append(out, fmt.Sprint(arg))
@@ -166,11 +231,11 @@ func generateTable(description string, args ...interface{}) {
case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
tableLevelEntryDescription = arg
case t.Kind() == reflect.Func:
- if itBody != nil {
+ if internalBody != nil {
exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl))
}
- itBody = arg
- itBodyType = reflect.TypeOf(itBody)
+ internalBody = arg
+ internalBodyType = reflect.TypeOf(internalBody)
default:
containerNodeArgs = append(containerNodeArgs, arg)
}
@@ -200,46 +265,58 @@ func generateTable(description string, args ...interface{}) {
err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
}
- itNodeArgs := []interface{}{entry.codeLocation}
- itNodeArgs = append(itNodeArgs, entry.decorations...)
+ internalNodeArgs := []any{entry.codeLocation}
+ internalNodeArgs = append(internalNodeArgs, entry.decorations...)
hasContext := false
- if itBodyType.NumIn() > 0. {
- if itBodyType.In(0).Implements(specContextType) {
+ if internalBodyType.NumIn() > 0 {
+ if internalBodyType.In(0).Implements(specContextType) {
hasContext = true
- } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) {
+ } else if internalBodyType.In(0).Implements(contextType) {
hasContext = true
+ if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) {
+ // we allow you to pass in a non-nil context
+ hasContext = false
+ }
}
}
if err == nil {
- err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext)
+ err = validateParameters(internalBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext)
}
if hasContext {
- itNodeArgs = append(itNodeArgs, func(c SpecContext) {
+ internalNodeArgs = append(internalNodeArgs, func(c SpecContext) {
if err != nil {
panic(err)
}
- invokeFunction(itBody, append([]interface{}{c}, entry.parameters...))
+ invokeFunction(internalBody, append([]any{c}, entry.parameters...))
})
+ if isSubtree {
+ exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl))
+ }
} else {
- itNodeArgs = append(itNodeArgs, func() {
+ internalNodeArgs = append(internalNodeArgs, func() {
if err != nil {
panic(err)
}
- invokeFunction(itBody, entry.parameters)
+ invokeFunction(internalBody, entry.parameters)
})
}
- pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...))
+ internalNodeType := types.NodeTypeIt
+ if isSubtree {
+ internalNodeType = types.NodeTypeContainer
+ }
+
+ pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, internalNodeType, description, internalNodeArgs...)))
}
})
- pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
+ pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)))
}
-func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
+func invokeFunction(function any, parameters []any) []reflect.Value {
inValues := make([]reflect.Value, len(parameters))
funcType := reflect.TypeOf(function)
@@ -262,7 +339,7 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va
return reflect.ValueOf(function).Call(inValues)
}
-func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error {
+func validateParameters(function any, parameters []any, kind string, cl types.CodeLocation, hasContext bool) error {
funcType := reflect.TypeOf(function)
limit := funcType.NumIn()
offset := 0
@@ -300,7 +377,7 @@ func validateParameters(function interface{}, parameters []interface{}, kind str
return nil
}
-func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
+func computeValue(parameter any, t reflect.Type) reflect.Value {
if parameter == nil {
return reflect.Zero(t)
} else {
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go
new file mode 100644
index 0000000000..a069e0623d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go
@@ -0,0 +1,56 @@
+package types
+
+import (
+ "context"
+)
+
+type AroundNodeAllowedFuncs interface {
+ ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func()
+}
+type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context))
+
+func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator {
+ if f == nil {
+ panic("BuildAroundNode cannot be called with a nil function.")
+ }
+ var aroundNodeFunc func(context.Context, func(context.Context))
+ switch x := any(f).(type) {
+ case func(context.Context, func(context.Context)):
+ aroundNodeFunc = x
+ case func(context.Context) context.Context:
+ aroundNodeFunc = func(ctx context.Context, body func(context.Context)) {
+ ctx = x(ctx)
+ body(ctx)
+ }
+ case func():
+ aroundNodeFunc = func(ctx context.Context, body func(context.Context)) {
+ x()
+ body(ctx)
+ }
+ }
+
+ return AroundNodeDecorator{
+ Body: aroundNodeFunc,
+ CodeLocation: cl,
+ }
+}
+
+type AroundNodeDecorator struct {
+ Body AroundNodeFunc
+ CodeLocation CodeLocation
+}
+
+type AroundNodes []AroundNodeDecorator
+
+func (an AroundNodes) Clone() AroundNodes {
+ out := make(AroundNodes, len(an))
+ copy(out, an)
+ return out
+}
+
+func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes {
+ out := make(AroundNodes, len(an)+len(other))
+ copy(out, an)
+ copy(out[len(an):], other)
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
index 9cd5768170..57e87517e0 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
@@ -149,7 +149,7 @@ func PruneStack(fullStackTrace string, skip int) string {
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
for i := 0; i < len(stack)/2; i++ {
// We filter out based on the source code file name.
- if !re.Match([]byte(stack[i*2+1])) {
+ if !re.MatchString(stack[i*2+1]) {
prunedStack = append(prunedStack, stack[i*2])
prunedStack = append(prunedStack, stack[i*2+1])
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go
index c88fc85a75..f847036046 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/config.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -24,7 +24,9 @@ type SuiteConfig struct {
FocusFiles []string
SkipFiles []string
LabelFilter string
+ SemVerFilter string
FailOnPending bool
+ FailOnEmpty bool
FailFast bool
FlakeAttempts int
MustPassRepeatedly int
@@ -89,8 +91,12 @@ type ReporterConfig struct {
VeryVerbose bool
FullTrace bool
ShowNodeEvents bool
+ GithubOutput bool
+ SilenceSkips bool
+ ForceNewlines bool
JSONReport string
+ GoJSONReport string
JUnitReport string
TeamcityReport string
}
@@ -107,7 +113,7 @@ func (rc ReporterConfig) Verbosity() VerbosityLevel {
}
func (rc ReporterConfig) WillGenerateReport() bool {
- return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
+ return rc.JSONReport != "" || rc.GoJSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
}
func NewDefaultReporterConfig() ReporterConfig {
@@ -155,7 +161,7 @@ func (g CLIConfig) ComputedProcs() int {
n := 1
if g.Parallel {
- n = runtime.NumCPU()
+ n = runtime.GOMAXPROCS(-1)
if n > 4 {
n = n - 1
}
@@ -168,7 +174,7 @@ func (g CLIConfig) ComputedNumCompilers() int {
return g.NumCompilers
}
- return runtime.NumCPU()
+ return runtime.GOMAXPROCS(-1)
}
// Configuration for the Ginkgo CLI capturing available go flags
@@ -198,6 +204,7 @@ type GoFlagsConfig struct {
A bool
ASMFlags string
BuildMode string
+ BuildVCS bool
Compiler string
GCCGoFlags string
GCFlags string
@@ -215,6 +222,7 @@ type GoFlagsConfig struct {
ToolExec string
Work bool
X bool
+ O string
}
func NewDefaultGoFlagsConfig() GoFlagsConfig {
@@ -225,6 +233,10 @@ func (g GoFlagsConfig) BinaryMustBePreserved() bool {
return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
}
+func (g GoFlagsConfig) NeedsSymbols() bool {
+ return g.BinaryMustBePreserved()
+}
+
// Configuration that were deprecated in 2.0
type deprecatedConfig struct {
DebugParallel bool
@@ -251,8 +263,12 @@ var FlagSections = GinkgoFlagSections{
{Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
{Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
{Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
- {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
- {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
+ {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis",
+ Description: "When generating a cover files, please pass a filename {{bold}}not{{/}} a path. To specify a different directory use {{magenta}}--output-dir{{/}}.",
+ },
+ {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis",
+ Description: "When generating profile files, please pass filenames {{bold}}not{{/}} a path. Ginkgo will generate a profile file with the given name in the package's directory. To specify a different directory use {{magenta}}--output-dir{{/}}.",
+ },
{Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
{Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
@@ -264,7 +280,7 @@ var FlagSections = GinkgoFlagSections{
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
var SuiteConfigFlags = GinkgoFlags{
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
- Usage: "The seed used to randomize the spec suite."},
+ Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
@@ -274,6 +290,8 @@ var SuiteConfigFlags = GinkgoFlags{
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
+ {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
+ Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
@@ -292,6 +310,8 @@ var SuiteConfigFlags = GinkgoFlags{
{KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
+ {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version",
+ Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"},
{KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
{KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
@@ -320,7 +340,7 @@ var ParallelConfigFlags = GinkgoFlags{
// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
var ReporterConfigFlags = GinkgoFlags{
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
- Usage: "If set, suppress color output in default reporter."},
+ Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"},
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
Usage: "If set, emits more output including GinkgoWriter contents."},
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
@@ -331,9 +351,17 @@ var ReporterConfigFlags = GinkgoFlags{
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
+ {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
+ Usage: "If set, default reporter prints easier to manage output in Github Actions."},
+ {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
+ Usage: "If set, default reporter will not print out skipped tests."},
+ {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
+ Usage: "If set, default reporter will ensure a newline appears after each test."},
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
+ {KeyPath: "R.GoJSONReport", Name: "gojson-report", UsageArgument: "filename.json", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a Go JSON-formatted test report at the specified location."},
{KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
{KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
@@ -351,7 +379,7 @@ var ReporterConfigFlags = GinkgoFlags{
func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
flags = flags.WithPrefix("ginkgo")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"D": &deprecatedConfig{},
@@ -421,6 +449,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re
}
}
+ if suiteConfig.SemVerFilter != "" {
+ _, err := ParseSemVerFilter(suiteConfig.SemVerFilter)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
case "", "dup", "swap", "none":
default:
@@ -499,9 +534,9 @@ var GinkgoCLIWatchFlags = GinkgoFlags{
// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
var GoBuildFlags = GinkgoFlags{
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
- Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
+ Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."},
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
- Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
+ Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty (by explicitly passing --vet=""), "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
{KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
@@ -515,6 +550,8 @@ var GoBuildFlags = GinkgoFlags{
Usage: "arguments to pass on each go tool asm invocation."},
{KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
Usage: "build mode to use. See 'go help buildmode' for more."},
+ {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build",
+ Usage: "adds version control information."},
{KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
{KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
@@ -551,10 +588,15 @@ var GoBuildFlags = GinkgoFlags{
Usage: "print the commands."},
}
+var GoBuildOFlags = GinkgoFlags{
+ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build",
+ Usage: "output binary path (including name)."},
+}
+
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
var GoRunFlags = GinkgoFlags{
{KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
- Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
+ Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover. Must be passed a filename, not a path. Use output-dir to control the location of the output.`},
{KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
{KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
@@ -582,6 +624,22 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
}
+ if strings.ContainsRune(goFlagsConfig.CoverProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--coverprofile", goFlagsConfig.CoverProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.CPUProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--cpuprofile", goFlagsConfig.CPUProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.MemProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--memprofile", goFlagsConfig.MemProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.BlockProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--blockprofile", goFlagsConfig.BlockProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.MutexProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--mutexprofile", goFlagsConfig.MutexProfile))
+ }
+
//initialize the output directory
if cliConfig.OutputDir != "" {
err := os.MkdirAll(cliConfig.OutputDir, 0777)
@@ -602,7 +660,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
}
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
-func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
+func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string, preserveSymbols bool) ([]string, error) {
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
// the built test binary can generate a coverprofile
if goFlagsConfig.CoverProfile != "" {
@@ -625,10 +683,14 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string,
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
}
- args := []string{"test", "-c", "-o", destination, packageToBuild}
+ if !goFlagsConfig.NeedsSymbols() && goFlagsConfig.LDFlags == "" && !preserveSymbols {
+ goFlagsConfig.LDFlags = "-w -s"
+ }
+
+ args := []string{"test", "-c", packageToBuild}
goArgs, err := GenerateFlagArgs(
- GoBuildFlags,
- map[string]interface{}{
+ GoBuildFlags.CopyAppend(GoBuildOFlags...),
+ map[string]any{
"Go": &goFlagsConfig,
},
)
@@ -647,7 +709,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC
flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": &suiteConfig,
"R": &reporterConfig,
"Go": &goFlagsConfig,
@@ -659,7 +721,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC
// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
flags := GoRunFlags.WithPrefix("test")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"Go": &goFlagsConfig,
}
@@ -681,7 +743,7 @@ func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterCo
flags = flags.CopyAppend(GoBuildFlags...)
flags = flags.CopyAppend(GoRunFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"C": cliConfig,
@@ -702,7 +764,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter
flags = flags.CopyAppend(GoBuildFlags...)
flags = flags.CopyAppend(GoRunFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"C": cliConfig,
@@ -717,8 +779,9 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter
func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
flags := GinkgoCLISharedFlags
flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoBuildOFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"C": cliConfig,
"Go": goFlagsConfig,
"D": &deprecatedConfig{},
@@ -742,7 +805,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig
func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"C": cliConfig,
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
index 17922304b6..518989a844 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
@@ -113,7 +113,7 @@ type DeprecatedSpecFailure struct {
type DeprecatedSpecMeasurement struct {
Name string
- Info interface{}
+ Info any
Order int
Results []float64
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
index 4fbdc3e9b1..59313238cf 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -88,7 +88,7 @@ body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, n
}
}
-func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
+func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic any, cl CodeLocation) error {
return GinkgoError{
Heading: "Assertion or Panic detected during tree construction",
Message: formatter.F(
@@ -189,7 +189,7 @@ func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl
}
}
-func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
+func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator any) error {
return GinkgoError{
Heading: "Unknown Decorator",
Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
@@ -345,7 +345,7 @@ func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
}
/* ReportEntry errors */
-func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
+func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg any) error {
return GinkgoError{
Heading: "Too Many ReportEntry Values",
Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
@@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error {
}
}
+func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid SemVerConstraint",
+ Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg),
+ CodeLocation: cl,
+ DocLink: "spec-semantic-version-filtering",
+ }
+}
+
+func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Empty SemVerConstraint",
+ Message: "SemVerConstraint cannot be empty",
+ CodeLocation: cl,
+ DocLink: "spec-semantic-version-filtering",
+ }
+}
+
/* Table errors */
func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
return GinkgoError{
@@ -505,6 +523,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac
}
}
+func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Contexts cannot be used in subtree tables",
+ Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
/* Parallel Synchronization errors */
func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
@@ -530,7 +557,7 @@ func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
/* Configuration errors */
-func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
+func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value any) error {
return GinkgoError{
Heading: "Unknown Type passed to RunSpecs",
Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
@@ -620,6 +647,20 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
}
}
+func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("%s expects a filename but was given a path: %s", flag, path),
+ Message: fmt.Sprintf("%s takes a filename, not a path. Use --output-dir to specify a directory to collect all test outputs.", flag),
+ }
+}
+
+func (g ginkgoErrors) FlagAfterPositionalParameter() error {
+ return GinkgoError{
+ Heading: "Malformed arguments - detected a flag after the package liste",
+ Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet=\"\" ./...' is valid but 'ginkgo -p ./... -vet=\"\"' is not{{/}}",
+ }
+}
+
/* Stack-Trace parsing errors */
func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
index 9186ae873d..8409653f97 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -24,7 +24,8 @@ type GinkgoFlag struct {
DeprecatedDocLink string
DeprecatedVersion string
- ExportAs string
+ ExportAs string
+ AlwaysExport bool
}
type GinkgoFlags []GinkgoFlag
@@ -91,7 +92,7 @@ func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
type GinkgoFlagSet struct {
flags GinkgoFlags
- bindings interface{}
+ bindings any
sections GinkgoFlagSections
extraGoFlagsSection GinkgoFlagSection
@@ -100,7 +101,7 @@ type GinkgoFlagSet struct {
}
// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
-func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
+func NewGinkgoFlagSet(flags GinkgoFlags, bindings any, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
return bindFlagSet(GinkgoFlagSet{
flags: flags,
bindings: bindings,
@@ -109,7 +110,7 @@ func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFl
}
// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
-func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
+func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings any, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
return bindFlagSet(GinkgoFlagSet{
flags: flags,
bindings: bindings,
@@ -334,7 +335,7 @@ func (f GinkgoFlagSet) substituteUsage() {
fmt.Fprintln(f.flagSet.Output(), f.Usage())
}
-func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
+func valueAtKeyPath(root any, keyPath string) (reflect.Value, bool) {
if len(keyPath) == 0 {
return reflect.Value{}, false
}
@@ -431,8 +432,8 @@ func (ssv stringSliceVar) Set(s string) error {
return nil
}
-//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
-func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
+// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
+func GenerateFlagArgs(flags GinkgoFlags, bindings any) ([]string, error) {
result := []string{}
for _, flag := range flags {
name := flag.ExportAs
@@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
iface := value.Interface()
switch value.Type() {
case reflect.TypeOf(string("")):
- if iface.(string) != "" {
+ if iface.(string) != "" || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
}
case reflect.TypeOf(int64(0)):
- if iface.(int64) != 0 {
+ if iface.(int64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
}
case reflect.TypeOf(float64(0)):
- if iface.(float64) != 0 {
+ if iface.(float64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
}
case reflect.TypeOf(int(0)):
- if iface.(int) != 0 {
+ if iface.(int) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
}
case reflect.TypeOf(bool(true)):
@@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
result = append(result, fmt.Sprintf("--%s", name))
}
case reflect.TypeOf(time.Duration(0)):
- if iface.(time.Duration) != time.Duration(0) {
+ if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
index b0d3b651e7..40a909b6d5 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter {
return func(labels []string) bool { return a(labels) || b(labels) }
}
+func labelSetFor(key string, labels []string) map[string]bool {
+ key = strings.ToLower(strings.TrimSpace(key))
+ out := map[string]bool{}
+ for _, label := range labels {
+ components := strings.SplitN(label, ":", 2)
+ if len(components) < 2 {
+ continue
+ }
+ if key == strings.ToLower(strings.TrimSpace(components[0])) {
+ out[strings.ToLower(strings.TrimSpace(components[1]))] = true
+ }
+ }
+
+ return out
+}
+
+func isEmptyLabelSetAction(key string) LabelFilter {
+ return func(labels []string) bool {
+ return len(labelSetFor(key, labels)) == 0
+ }
+}
+
+func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if set[value] {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ if len(set) != len(expectedValues) {
+ return false
+ }
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ expectedSet := map[string]bool{}
+ for _, value := range expectedValues {
+ expectedSet[value] = true
+ }
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for value := range set {
+ if !expectedSet[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
type lfToken uint
const (
@@ -58,6 +135,9 @@ const (
lfTokenOr
lfTokenRegexp
lfTokenLabel
+ lfTokenSetKey
+ lfTokenSetOperation
+ lfTokenSetArgument
lfTokenEOF
)
@@ -71,6 +151,8 @@ func (l lfToken) Precedence() int {
return 2
case lfTokenNot:
return 3
+ case lfTokenSetOperation:
+ return 4
}
return -1
}
@@ -93,6 +175,12 @@ func (l lfToken) String() string {
return "/regexp/"
case lfTokenLabel:
return "label"
+ case lfTokenSetKey:
+ return "set_key"
+ case lfTokenSetOperation:
+ return "set_operation"
+ case lfTokenSetArgument:
+ return "set_argument"
case lfTokenEOF:
return "EOF"
}
@@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
}
return matchLabelRegexAction(re), nil
+ case lfTokenSetOperation:
+ tokenSetOperation := strings.ToLower(tn.value)
+ if tokenSetOperation == "isempty" {
+ return isEmptyLabelSetAction(tn.leftNode.value), nil
+ }
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
+ }
+
+ rawValues := strings.Split(tn.rightNode.value, ",")
+ values := make([]string, len(rawValues))
+ for i := range rawValues {
+ values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
+ if strings.ContainsAny(values[i], "&|!,()/") {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
+ } else if values[i] == "" {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
+ }
+ }
+ switch tokenSetOperation {
+ case "containsany":
+ return containsAnyLabelSetAction(tn.leftNode.value, values), nil
+ case "containsall":
+ return containsAllLabelSetAction(tn.leftNode.value, values), nil
+ case "consistsof":
+ return consistsOfLabelSetAction(tn.leftNode.value, values), nil
+ case "issubsetof":
+ return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
+ }
}
if tn.rightNode == nil {
@@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string {
return out
}
+var validSetOperations = map[string]string{
+ "containsany": "containsAny",
+ "containsall": "containsAll",
+ "consistsof": "consistsOf",
+ "issubsetof": "isSubsetOf",
+ "isempty": "isEmpty",
+}
+
func tokenize(input string) func() (*treeNode, error) {
+ lastToken := lfTokenInvalid
+ lastValue := ""
runes, i := []rune(input), 0
peekIs := func(r rune) bool {
@@ -216,7 +343,7 @@ func tokenize(input string) func() (*treeNode, error) {
consumeUntil := func(cutset string) (string, int) {
j := i
for ; j < len(runes); j++ {
- if strings.IndexRune(cutset, runes[j]) >= 0 {
+ if strings.ContainsRune(cutset, runes[j]) {
break
}
}
@@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) {
}
node := &treeNode{location: i}
+ defer func() {
+ lastToken = node.token
+ lastValue = node.value
+ }()
+
+ if lastToken == lfTokenSetKey {
+ //we should get a valid set operation next
+ value, n := consumeUntil(" )")
+ if validSetOperations[strings.ToLower(value)] == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
+ }
+ i += n
+ node.token, node.value = lfTokenSetOperation, value
+ return node, nil
+ }
+ if lastToken == lfTokenSetOperation {
+ //we should get an argument next, if we aren't isempty
+ var arg = ""
+ origI := i
+ if runes[i] == '{' {
+ i += 1
+ value, n := consumeUntil("}")
+ if i+n >= len(runes) {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
+ }
+ i += n + 1
+ arg = value
+ } else {
+ value, n := consumeUntil("&|!,()/")
+ i += n
+ arg = strings.TrimSpace(value)
+ }
+ if strings.ToLower(lastValue) == "isempty" && arg != "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
+ }
+ if arg == "" && strings.ToLower(lastValue) != "isempty" {
+ if i < len(runes) && runes[i] == '/' {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
+ } else {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
+ }
+ }
+ // note that we sent an empty SetArgument token if we are isempty
+ node.token, node.value = lfTokenSetArgument, arg
+ return node, nil
+ }
+
switch runes[i] {
case '&':
if !peekIs('&') {
@@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) {
i += n + 1
node.token, node.value = lfTokenRegexp, value
default:
- value, n := consumeUntil("&|!,()/")
+ value, n := consumeUntil("&|!,()/:")
i += n
+ value = strings.TrimSpace(value)
+
+ //are we the beginning of a set operation?
+ if i < len(runes) && runes[i] == ':' {
+ if peekIs(' ') {
+ if value == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
+ }
+ i += 1
+ //we are the beginning of a set operation
+ node.token, node.value = lfTokenSetKey, value
+ return node, nil
+ }
+ additionalValue, n := consumeUntil("&|!,()/")
+ additionalValue = strings.TrimSpace(additionalValue)
+ if additionalValue == ":" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
+ }
+ i += n
+ value += additionalValue
+ }
+
+ valueToCheckForSetOperation := strings.ToLower(value)
+ for setOperation := range validSetOperations {
+ idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
+ if idx > 0 {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
+ }
+ }
+
node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
}
return node, nil
@@ -307,7 +511,7 @@ LOOP:
switch node.token {
case lfTokenEOF:
break LOOP
- case lfTokenLabel, lfTokenRegexp:
+ case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
if current.rightNode != nil {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
}
@@ -326,6 +530,18 @@ LOOP:
node.setLeftNode(nodeToStealFrom.rightNode)
nodeToStealFrom.setRightNode(node)
current = node
+ case lfTokenSetOperation:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
+ }
+ node.setLeftNode(current.rightNode)
+ current.setRightNode(node)
+ current = node
+ case lfTokenSetArgument:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
+ }
+ current.setRightNode(node)
case lfTokenCloseGroup:
firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
if firstUnmatchedOpenNode == nil {
@@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
if strings.ContainsAny(out, "&|!,()/") {
return "", GinkgoErrors.InvalidLabel(label, cl)
}
+ if out[0] == ':' {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if strings.Contains(out, ":") {
+ components := strings.SplitN(out, ":", 2)
+ if len(components) < 2 || components[1] == "" {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ }
return out, nil
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
index 7b1524b52e..63f7a9f6da 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
@@ -9,18 +9,18 @@ import (
// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
// and across the network connection when running in parallel
type ReportEntryValue struct {
- raw interface{} //unexported to prevent gob from freaking out about unregistered structs
+ raw any //unexported to prevent gob from freaking out about unregistered structs
AsJSON string
Representation string
}
-func WrapEntryValue(value interface{}) ReportEntryValue {
+func WrapEntryValue(value any) ReportEntryValue {
return ReportEntryValue{
raw: value,
}
}
-func (rev ReportEntryValue) GetRawValue() interface{} {
+func (rev ReportEntryValue) GetRawValue() any {
return rev.raw
}
@@ -118,7 +118,7 @@ func (entry ReportEntry) StringRepresentation() string {
// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
// field yourself.
-func (entry ReportEntry) GetRawValue() interface{} {
+func (entry ReportEntry) GetRawValue() any {
return entry.Value.GetRawValue()
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go
new file mode 100644
index 0000000000..3fc2ed144b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go
@@ -0,0 +1,60 @@
+package types
+
+import (
+ "fmt"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+type SemVerFilter func([]string) bool
+
+func MustParseSemVerFilter(input string) SemVerFilter {
+ filter, err := ParseSemVerFilter(input)
+ if err != nil {
+ panic(err)
+ }
+ return filter
+}
+
+func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) {
+ if filterVersion == "" {
+ return func(_ []string) bool { return true }, nil
+ }
+
+ targetVersion, err := semver.NewVersion(filterVersion)
+ if err != nil {
+ return nil, fmt.Errorf("invalid filter version: %w", err)
+ }
+
+ return func(constraints []string) bool {
+ // unconstrained specs always run
+ if len(constraints) == 0 {
+ return true
+ }
+
+ for _, constraintStr := range constraints {
+ constraint, err := semver.NewConstraint(constraintStr)
+ if err != nil {
+ return false
+ }
+
+ if !constraint.Check(targetVersion) {
+ return false
+ }
+ }
+
+ return true
+ }, nil
+}
+
+func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) {
+ if len(semVerConstraint) == 0 {
+ return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl)
+ }
+ _, err := semver.NewConstraint(semVerConstraint)
+ if err != nil {
+ return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl)
+ }
+
+ return semVerConstraint, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go
index aae69b04c9..9981a0dd68 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/types.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go
@@ -3,13 +3,73 @@ package types
import (
"encoding/json"
"fmt"
+ "os"
+ "slices"
"sort"
"strings"
"time"
)
const GINKGO_FOCUS_EXIT_CODE = 197
-const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
+
+var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
+
+func init() {
+ if os.Getenv("GINKGO_TIME_FORMAT") != "" {
+ GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT")
+ }
+}
+
+// ConstructionNodeReport captures information about a Ginkgo spec.
+type ConstructionNodeReport struct {
+ // ContainerHierarchyTexts is a slice containing the text strings of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyTexts []string
+
+ // ContainerHierarchyLocations is a slice containing the CodeLocations of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyLocations []CodeLocation
+
+ // ContainerHierarchyLabels is a slice containing the labels of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchyLabels [][]string
+
+ // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchySemVerConstraints [][]string
+
+ // IsSerial captures whether the any container has the Serial decorator
+ IsSerial bool
+
+ // IsInOrderedContainer captures whether any container is an Ordered container
+ IsInOrderedContainer bool
+}
+
+// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
+func (report ConstructionNodeReport) FullText() string {
+ texts := []string{}
+ texts = append(texts, report.ContainerHierarchyTexts...)
+ texts = slices.DeleteFunc(texts, func(t string) bool {
+ return t == ""
+ })
+ return strings.Join(texts, " ")
+}
+
+// Labels returns a deduped set of all the spec's Labels.
+func (report ConstructionNodeReport) Labels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, labels := range report.ContainerHierarchyLabels {
+ for _, label := range labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+
+ return out
+}
// Report captures information about a Ginkgo test run
type Report struct {
@@ -22,6 +82,9 @@ type Report struct {
//SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function
SuiteLabels []string
+ //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function
+ SuiteSemVerConstraints []string
+
//SuiteSucceeded captures the success or failure status of the test run
//If true, the test run is considered successful.
//If false, the test run is considered unsuccessful
@@ -121,13 +184,21 @@ type SpecReport struct {
// all Describe/Context/When containers in this spec's hierarchy
ContainerHierarchyLabels [][]string
- // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text
+ // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchySemVerConstraints [][]string
+
+ // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text
// of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
// one of the NodeTypesForSuiteLevelNodes node types)
- LeafNodeType NodeType
- LeafNodeLocation CodeLocation
- LeafNodeLabels []string
- LeafNodeText string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeSemVerConstraints []string
+ LeafNodeText string
+
+ // Captures the Spec Priority
+ SpecPriority int
// State captures whether the spec has passed, failed, etc.
State SpecState
@@ -190,48 +261,52 @@ type SpecReport struct {
func (report SpecReport) MarshalJSON() ([]byte, error) {
//All this to avoid emitting an empty Failure struct in the JSON
out := struct {
- ContainerHierarchyTexts []string
- ContainerHierarchyLocations []CodeLocation
- ContainerHierarchyLabels [][]string
- LeafNodeType NodeType
- LeafNodeLocation CodeLocation
- LeafNodeLabels []string
- LeafNodeText string
- State SpecState
- StartTime time.Time
- EndTime time.Time
- RunTime time.Duration
- ParallelProcess int
- Failure *Failure `json:",omitempty"`
- NumAttempts int
- MaxFlakeAttempts int
- MaxMustPassRepeatedly int
- CapturedGinkgoWriterOutput string `json:",omitempty"`
- CapturedStdOutErr string `json:",omitempty"`
- ReportEntries ReportEntries `json:",omitempty"`
- ProgressReports []ProgressReport `json:",omitempty"`
- AdditionalFailures []AdditionalFailure `json:",omitempty"`
- SpecEvents SpecEvents `json:",omitempty"`
+ ContainerHierarchyTexts []string
+ ContainerHierarchyLocations []CodeLocation
+ ContainerHierarchyLabels [][]string
+ ContainerHierarchySemVerConstraints [][]string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeSemVerConstraints []string
+ LeafNodeText string
+ State SpecState
+ StartTime time.Time
+ EndTime time.Time
+ RunTime time.Duration
+ ParallelProcess int
+ Failure *Failure `json:",omitempty"`
+ NumAttempts int
+ MaxFlakeAttempts int
+ MaxMustPassRepeatedly int
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ CapturedStdOutErr string `json:",omitempty"`
+ ReportEntries ReportEntries `json:",omitempty"`
+ ProgressReports []ProgressReport `json:",omitempty"`
+ AdditionalFailures []AdditionalFailure `json:",omitempty"`
+ SpecEvents SpecEvents `json:",omitempty"`
}{
- ContainerHierarchyTexts: report.ContainerHierarchyTexts,
- ContainerHierarchyLocations: report.ContainerHierarchyLocations,
- ContainerHierarchyLabels: report.ContainerHierarchyLabels,
- LeafNodeType: report.LeafNodeType,
- LeafNodeLocation: report.LeafNodeLocation,
- LeafNodeLabels: report.LeafNodeLabels,
- LeafNodeText: report.LeafNodeText,
- State: report.State,
- StartTime: report.StartTime,
- EndTime: report.EndTime,
- RunTime: report.RunTime,
- ParallelProcess: report.ParallelProcess,
- Failure: nil,
- ReportEntries: nil,
- NumAttempts: report.NumAttempts,
- MaxFlakeAttempts: report.MaxFlakeAttempts,
- MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
- CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
- CapturedStdOutErr: report.CapturedStdOutErr,
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ ContainerHierarchyLocations: report.ContainerHierarchyLocations,
+ ContainerHierarchyLabels: report.ContainerHierarchyLabels,
+ ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints,
+ LeafNodeType: report.LeafNodeType,
+ LeafNodeLocation: report.LeafNodeLocation,
+ LeafNodeLabels: report.LeafNodeLabels,
+ LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints,
+ LeafNodeText: report.LeafNodeText,
+ State: report.State,
+ StartTime: report.StartTime,
+ EndTime: report.EndTime,
+ RunTime: report.RunTime,
+ ParallelProcess: report.ParallelProcess,
+ Failure: nil,
+ ReportEntries: nil,
+ NumAttempts: report.NumAttempts,
+ MaxFlakeAttempts: report.MaxFlakeAttempts,
+ MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
+ CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
+ CapturedStdOutErr: report.CapturedStdOutErr,
}
if !report.Failure.IsZero() {
@@ -279,6 +354,9 @@ func (report SpecReport) FullText() string {
if report.LeafNodeText != "" {
texts = append(texts, report.LeafNodeText)
}
+ texts = slices.DeleteFunc(texts, func(t string) bool {
+ return t == ""
+ })
return strings.Join(texts, " ")
}
@@ -304,6 +382,28 @@ func (report SpecReport) Labels() []string {
return out
}
+// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints.
+func (report SpecReport) SemVerConstraints() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints {
+ for _, semVerConstraint := range semVerConstraints {
+ if !seen[semVerConstraint] {
+ seen[semVerConstraint] = true
+ out = append(out, semVerConstraint)
+ }
+ }
+ }
+ for _, semVerConstraint := range report.LeafNodeSemVerConstraints {
+ if !seen[semVerConstraint] {
+ seen[semVerConstraint] = true
+ out = append(out, semVerConstraint)
+ }
+ }
+
+ return out
+}
+
// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
filter, err := ParseLabelFilter(query)
@@ -313,6 +413,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
return filter(report.Labels()), nil
}
+// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query
+func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) {
+ filter, err := ParseSemVerFilter(version)
+ if err != nil {
+ return false, err
+ }
+ return filter(report.SemVerConstraints()), nil
+}
+
// FileName() returns the name of the file containing the spec
func (report SpecReport) FileName() string {
return report.LeafNodeLocation.FileName
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
index a37f308286..b9c1ea9856 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/version.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -1,3 +1,3 @@
package types
-const VERSION = "2.13.0"
+const VERSION = "2.27.2"
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 4fc45f29c0..b7d7309f3f 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,192 @@
+## 1.38.2
+
+- roll back to go 1.23.0 [c404969]
+
+## 1.38.1
+
+### Fixes
+
+Numerous minor fixes and dependency bumps
+
+## 1.38.0
+
+### Features
+- gstruct handles extra unexported fields [4ee7ed0]
+
+### Fixes
+- support [] in IgnoringTopFunction function signatures (#851) [36bbf72]
+
+### Maintenance
+- Bump golang.org/x/net from 0.40.0 to 0.41.0 (#846) [529d408]
+- Fix typo [acd1f55]
+- Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#835) [bae65a0]
+- Bump nokogiri from 1.18.4 to 1.18.8 in /docs (#842) [8dda91f]
+- Bump golang.org/x/net from 0.39.0 to 0.40.0 (#843) [212d812]
+- Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#839) [59bd7f9]
+- Bump nokogiri from 1.18.1 to 1.18.4 in /docs (#834) [328c729]
+- Bump uri from 1.0.2 to 1.0.3 in /docs (#826) [9a798a1]
+- Bump golang.org/x/net from 0.37.0 to 0.39.0 (#841) [04a72c6]
+
+## 1.37.0
+
+### Features
+- add To/ToNot/NotTo aliases for AsyncAssertion [5666f98]
+
+## 1.36.3
+
+### Maintenance
+
+- bump all the things [adb8b49]
+- chore: replace `interface{}` with `any` [7613216]
+- Bump google.golang.org/protobuf from 1.36.1 to 1.36.5 (#822) [9fe5259]
+- remove spurious "toolchain" from go.mod (#819) [a0e85b9]
+- Bump golang.org/x/net from 0.33.0 to 0.35.0 (#823) [604a8b1]
+- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#772) [36fbc84]
+- Bump github-pages from 231 to 232 in /docs (#778) [ced70d7]
+- Bump rexml from 3.2.6 to 3.3.9 in /docs (#788) [c8b4a07]
+- Bump github.com/onsi/ginkgo/v2 from 2.22.1 to 2.22.2 (#812) [06431b9]
+- Bump webrick from 1.8.1 to 1.9.1 in /docs (#800) [b55a92d]
+- Fix typos (#813) [a1d518b]
+
+## 1.36.2
+
+### Maintenance
+- Bump google.golang.org/protobuf from 1.35.1 to 1.36.1 (#810) [9a7609d]
+- Bump golang.org/x/net from 0.30.0 to 0.33.0 (#807) [b6cb028]
+- Bump github.com/onsi/ginkgo/v2 from 2.20.1 to 2.22.1 (#808) [5756529]
+- Bump nokogiri from 1.16.3 to 1.16.5 in /docs (#757) [dabc12e]
+
+## 1.36.1
+
+### Fixes
+- Fix https://github.com/onsi/gomega/issues/803 [1c6c112]
+- resolves onsi/gomega#696: make HaveField great on pointer receivers given only a non-addressable value [4feb9d7]
+
+## 1.36.0
+
+### Features
+- new: make collection-related matchers Go 1.23 iterator aware [4c964c6]
+
+### Maintenance
+- Replace min/max helpers with built-in min/max [ece6872]
+- Fix some typos in docs [8e924d7]
+
+## 1.35.1
+
+### Fixes
+- Export EnforceDefaultTimeoutsWhenUsingContexts and DisableDefaultTimeoutsWhenUsingContext [ca36da1]
+
+## 1.35.0
+
+### Features
+
+- You can now call `EnforceDefaultTimeoutsWhenUsingContexts()` to have `Eventually` honor the default timeout when passed a context. (prior to this you had to expclility add a timeout) [e4c4265]
+- You can call `StopTrying(message).Successfully()` to abort a `Consistently` early without failure [eeca931]
+
+### Fixes
+
+- Stop memoizing the result of `HaveField` to avoid unexpected errors when used with async assertions. [3bdbc4e]
+
+### Maintenance
+
+- Bump all dependencies [a05a416]
+
+## 1.34.2
+
+Require Go 1.22+
+
+### Maintenance
+- bump ginkgo as well [c59c6dc]
+- bump to go 1.22 - remove x/exp dependency [8158b99]
+
+## 1.34.1
+
+### Maintenance
+- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd]
+
+## 1.34.0
+
+### Features
+- Add RoundTripper method to ghttp.Server [c549e0d]
+
+### Fixes
+- fix incorrect handling of nil slices in HaveExactElements (fixes #771) [878940c]
+- issue_765 - fixed bug in Hopcroft-Karp algorithm [ebadb67]
+
+### Maintenance
+- bump ginkgo [8af2ece]
+- Fix typo in docs [123a071]
+- Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 (#756) [0e69083]
+- Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#755) [2675796]
+- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#754) [4160c0f]
+- Bump github-pages from 230 to 231 in /docs (#748) [892c303]
+
+## 1.33.1
+
+### Fixes
+- fix confusing eventually docs [3a66379]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a]
+
+## 1.33.0
+
+### Features
+
+`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer.
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb]
+- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596]
+
+## 1.32.0
+
+### Maintenance
+- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197]
+
+ This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one.
+
+- chore: test with Go 1.22 (#733) [32ef35e]
+- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387]
+- Bump github-pages and jekyll-feed in /docs (#732) [b71e477]
+- docs: fix typo and broken anchor link to gstruct [f460154]
+- docs: fix HaveEach matcher signature [a2862e4]
+
+## 1.31.1
+
+### Fixes
+- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999]
+- Update test in case keeping msg is desired [ad1a367]
+
+### Maintenance
+- Show how to import the format sub package [24e958d]
+- tidy up go.sum [26661b8]
+- bump dependencies [bde8f7a]
+
+## 1.31.0
+
+### Features
+- Async assertions include context cancellation cause if present [121c37f]
+
+### Maintenance
+- Bump minimum go version [dee1e3c]
+- docs: fix typo in example usage "occured" -> "occurred" [49005fe]
+- Bump actions/setup-go from 4 to 5 (#714) [f1c8757]
+- Bump github/codeql-action from 2 to 3 (#715) [9836e76]
+- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0]
+- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc]
+- docs: fix `HaveExactElement` typo (#712) [a672c86]
+
+## 1.30.0
+
+### Features
+- BeTrueBecause and BeFalseBecause allow for better failure messages [4da4c7f]
+
+### Maintenance
+- Bump actions/checkout from 3 to 4 (#694) [6ca6e97]
+- doc: fix type on gleak go doc [f1b8343]
+
## 1.29.0
### Features
@@ -183,7 +372,7 @@
### Features
-Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
+Introducing [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable.
@@ -322,7 +511,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/
- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497]
- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5]
- Fix for Go 1.18 (#532) [56d2a29]
-- Document precendence of timeouts (#533) [b607941]
+- Document precedence of timeouts (#533) [b607941]
## 1.18.1
@@ -339,7 +528,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/
## Fixes
- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
-## Maintenace
+## Maintenance
- Remove Travis workflow (#491) [72e6040]
- Upgrade to Ginkgo 2.0.0 GA [f383637]
- chore: fix description of HaveField matcher (#487) [2b4b2c0]
@@ -587,7 +776,7 @@ Improvements:
- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
-- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
+- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShouldNot(Receive()) always passes with a closed channel.
- Added `HavePrefix` and `HaveSuffix` matchers.
- `ghttp` can now handle concurrent requests.
- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
@@ -597,7 +786,7 @@ Improvements:
- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
-- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time.
+- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the allotted time.
- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer.
Bug Fixes:
@@ -642,7 +831,7 @@ New Matchers:
Updated Matchers:
-- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
+- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an object that satisfies the passed-in matcher.
- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
Misc:
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
index 6c1680638b..96f04b2104 100644
--- a/vendor/github.com/onsi/gomega/format/format.go
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -57,7 +57,7 @@ var Indent = " "
var longFormThreshold = 20
-// GomegaStringer allows for custom formating of objects for gomega.
+// GomegaStringer allows for custom formatting of objects for gomega.
type GomegaStringer interface {
// GomegaString will be used to custom format an object.
// It does not follow UseStringerRepresentation value and will always be called regardless.
@@ -73,7 +73,7 @@ If the CustomFormatter does not want to handle the object it should return ("",
Strings returned by CustomFormatters are not truncated
*/
-type CustomFormatter func(value interface{}) (string, bool)
+type CustomFormatter func(value any) (string, bool)
type CustomFormatterKey uint
var customFormatterKey CustomFormatterKey = 1
@@ -125,7 +125,7 @@ If expected is omitted, then the message looks like:
*/
-func Message(actual interface{}, message string, expected ...interface{}) string {
+func Message(actual any, message string, expected ...any) string {
if len(expected) == 0 {
return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
}
@@ -255,7 +255,7 @@ recursing into the object.
Set PrintContextObjects to true to print the content of objects implementing context.Context
*/
-func Object(object interface{}, indentation uint) string {
+func Object(object any, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
commonRepresentation := ""
@@ -392,7 +392,7 @@ func formatValue(value reflect.Value, indentation uint) string {
}
}
-func formatString(object interface{}, indentation uint) string {
+func formatString(object any, indentation uint) string {
if indentation == 1 {
s := fmt.Sprintf("%s", object)
components := strings.Split(s, "\n")
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index ba082146a7..fdba34ee9d 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.29.0"
+const GOMEGA_VERSION = "1.38.2"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -178,7 +178,7 @@ func ensureDefaultGomegaIsConfigured() {
// All subsequent arguments will be required to be nil/zero.
//
// This is convenient if you want to make an assertion on a method/function that returns
-// a value and an error - a common patter in Go.
+// a value and an error - a common pattern in Go.
//
// For example, given a function with signature:
//
@@ -191,7 +191,7 @@ func ensureDefaultGomegaIsConfigured() {
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Ω and Expect are identical
-func Ω(actual interface{}, extra ...interface{}) Assertion {
+func Ω(actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.Ω(actual, extra...)
}
@@ -217,7 +217,7 @@ func Ω(actual interface{}, extra ...interface{}) Assertion {
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Expect and Ω are identical
-func Expect(actual interface{}, extra ...interface{}) Assertion {
+func Expect(actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.Expect(actual, extra...)
}
@@ -233,7 +233,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion {
// This is most useful in helper functions that make assertions. If you want Gomega's
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
// set the first argument of `ExpectWithOffset` appropriately.
-func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
+func ExpectWithOffset(offset int, actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.ExpectWithOffset(offset, actual, extra...)
}
@@ -319,7 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in
Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17))
}, SpecTimeout(time.Second))
-Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
+Either way the context passed to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
+
+By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example:
+
+ Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17))
+
+now either the context cancellation or the timeout will cause Eventually to stop polling.
+
+If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call:
+
+ EnforceDefaultTimeoutsWhenUsingContexts()
+
+in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if either the context is cancelled or the default timeout elapses.
**Category 3: Making assertions _in_ the function passed into Eventually**
@@ -372,13 +384,13 @@ You can ensure that you get a number of consecutive successful tries before succ
Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods:
- Eventually(..., "1s", "2s", ctx).Should(...)
+ Eventually(..., "10s", "2s", ctx).Should(...)
is equivalent to
- Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
+ Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
*/
-func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func Eventually(actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.Eventually(actualOrCtx, args...)
}
@@ -392,7 +404,7 @@ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
// the same as `Eventually(...).WithOffset(...).WithTimeout` or
// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
-func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
}
@@ -412,7 +424,7 @@ Consistently is useful in cases where you want to assert that something *does no
This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
*/
-func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func Consistently(actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.Consistently(actualOrCtx, args...)
}
@@ -423,13 +435,13 @@ func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
//
// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
// optional `WithTimeout` and `WithPolling`.
-func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
}
/*
-StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
+StopTrying can be used to signal to Eventually and Consistently that they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution.
@@ -491,6 +503,16 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) {
Default.SetDefaultConsistentlyPollingInterval(t)
}
+// EnforceDefaultTimeoutsWhenUsingContexts forces `Eventually` to apply a default timeout even when a context is provided.
+func EnforceDefaultTimeoutsWhenUsingContexts() {
+ Default.EnforceDefaultTimeoutsWhenUsingContexts()
+}
+
+// DisableDefaultTimeoutsWhenUsingContext disables the default timeout when a context is provided to `Eventually`.
+func DisableDefaultTimeoutsWhenUsingContext() {
+ Default.DisableDefaultTimeoutsWhenUsingContext()
+}
+
// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
// the matcher passed to the Should and ShouldNot methods.
//
diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go
index 08356a610b..cc846e7ce7 100644
--- a/vendor/github.com/onsi/gomega/internal/assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/assertion.go
@@ -9,19 +9,19 @@ import (
)
type Assertion struct {
- actuals []interface{} // actual value plus all extra values
- actualIndex int // value to pass to the matcher
- vet vetinari // the vet to call before calling Gomega matcher
+ actuals []any // actual value plus all extra values
+ actualIndex int // value to pass to the matcher
+ vet vetinari // the vet to call before calling Gomega matcher
offset int
g *Gomega
}
// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right.
-type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool
+type vetinari func(assertion *Assertion, optionalDescription ...any) bool
-func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion {
+func NewAssertion(actualInput any, g *Gomega, offset int, extra ...any) *Assertion {
return &Assertion{
- actuals: append([]interface{}{actualInput}, extra...),
+ actuals: append([]any{actualInput}, extra...),
actualIndex: 0,
vet: (*Assertion).vetActuals,
offset: offset,
@@ -44,37 +44,37 @@ func (assertion *Assertion) Error() types.Assertion {
}
}
-func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
+func (assertion *Assertion) buildDescription(optionalDescription ...any) string {
switch len(optionalDescription) {
case 0:
return ""
@@ -86,7 +86,7 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{})
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
-func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool {
actualInput := assertion.actuals[assertion.actualIndex]
matches, err := matcher.Match(actualInput)
assertion.g.THelper()
@@ -113,7 +113,7 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
// vetActuals vets the actual values, with the (optional) exception of a
// specific value, such as the first value in case non-error assertions, or the
// last value in case of Error()-based assertions.
-func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool {
+func (assertion *Assertion) vetActuals(optionalDescription ...any) bool {
success, message := vetActuals(assertion.actuals, assertion.actualIndex)
if success {
return true
@@ -129,7 +129,7 @@ func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool
// the final error value is non-zero. Otherwise, it doesn't vet the actual
// values, as these are allowed to take on any values unless there is a non-zero
// error value.
-func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
+func (assertion *Assertion) vetError(optionalDescription ...any) bool {
if err := assertion.actuals[assertion.actualIndex]; err != nil {
// Go error result idiom: all other actual values must be zero values.
return assertion.vetActuals(optionalDescription...)
@@ -139,7 +139,7 @@ func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
// vetActuals vets a slice of actual values, optionally skipping a particular
// value slice element, such as the first or last value slice element.
-func vetActuals(actuals []interface{}, skipIndex int) (bool, string) {
+func vetActuals(actuals []any, skipIndex int) (bool, string) {
for i, actual := range actuals {
if i == skipIndex {
continue
diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go
index 1188b0bce3..4121505b62 100644
--- a/vendor/github.com/onsi/gomega/internal/async_assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go
@@ -69,8 +69,8 @@ type AsyncAssertion struct {
asyncType AsyncAssertionType
actualIsFunc bool
- actual interface{}
- argsToForward []interface{}
+ actual any
+ argsToForward []any
timeoutInterval time.Duration
pollingInterval time.Duration
@@ -80,7 +80,7 @@ type AsyncAssertion struct {
g *Gomega
}
-func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
+func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput any, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
out := &AsyncAssertion{
asyncType: asyncType,
timeoutInterval: timeoutInterval,
@@ -129,7 +129,7 @@ func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAss
return assertion
}
-func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion {
+func (assertion *AsyncAssertion) WithArguments(argsToForward ...any) types.AsyncAssertion {
assertion.argsToForward = argsToForward
return assertion
}
@@ -139,19 +139,31 @@ func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssert
return assertion
}
-func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool {
+ return assertion.Should(matcher, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
+func (assertion *AsyncAssertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
+ return assertion.ShouldNot(matcher, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool {
+ return assertion.ShouldNot(matcher, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) buildDescription(optionalDescription ...any) string {
switch len(optionalDescription) {
case 0:
return ""
@@ -163,7 +175,7 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
-func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) {
+func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (any, error) {
if len(values) == 0 {
return nil, &asyncPolledActualError{
message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType),
@@ -224,7 +236,7 @@ func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvid
if numProvided == 1 {
have = "has"
}
- return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments.
+ return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the correct set of arguments.
You can learn more at https://onsi.github.io/gomega/#eventually
`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType)
@@ -237,9 +249,9 @@ You can learn more at https://onsi.github.io/gomega/#eventually
`, assertion.asyncType, reason)
}
-func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) {
+func (assertion *AsyncAssertion) buildActualPoller() (func() (any, error), error) {
if !assertion.actualIsFunc {
- return func() (interface{}, error) { return assertion.actual, nil }, nil
+ return func() (any, error) { return assertion.actual, nil }, nil
}
actualValue := reflect.ValueOf(assertion.actual)
actualType := reflect.TypeOf(assertion.actual)
@@ -301,7 +313,7 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error
return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1")
}
- return func() (actual interface{}, err error) {
+ return func() (actual any, err error) {
var values []reflect.Value
assertionFailure = nil
defer func() {
@@ -335,7 +347,7 @@ func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time {
if assertion.asyncType == AsyncAssertionTypeConsistently {
return time.After(assertion.g.DurationBundle.ConsistentlyDuration)
} else {
- if assertion.ctx == nil {
+ if assertion.ctx == nil || assertion.g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts {
return time.After(assertion.g.DurationBundle.EventuallyTimeout)
} else {
return nil
@@ -354,14 +366,14 @@ func (assertion *AsyncAssertion) afterPolling() <-chan time.Time {
}
}
-func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool {
+func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value any) bool {
if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) {
return false
}
return true
}
-func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) {
+func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value any) (matches bool, err error) {
defer func() {
if e := recover(); e != nil {
if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
@@ -377,13 +389,13 @@ func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value
return
}
-func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool {
timer := time.Now()
timeout := assertion.afterTimeout()
lock := sync.Mutex{}
var matches, hasLastValidActual bool
- var actual, lastValidActual interface{}
+ var actual, lastValidActual any
var actualErr, matcherErr error
var oracleMatcherSaysStop bool
@@ -440,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch
}
} else {
var fgErr formattedGomegaError
- if errors.As(actualErr, &fgErr) {
+ if errors.As(matcherErr, &fgErr) {
message += fgErr.FormattedGomegaError() + "\n"
} else {
message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr)
@@ -496,7 +508,15 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch
for _, err := range []error{actualErr, matcherErr} {
if pollingSignalErr, ok := AsPollingSignalError(err); ok {
if pollingSignalErr.IsStopTrying() {
- fail("Told to stop trying")
+ if pollingSignalErr.IsSuccessful() {
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ fail("Told to stop trying (and ignoring call to Successfully(), as it is only relevant with Consistently)")
+ } else {
+ return true // early escape hatch for Consistently
+ }
+ } else {
+ fail("Told to stop trying")
+ }
return false
}
if pollingSignalErr.IsTryAgainAfter() {
@@ -553,7 +573,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch
lock.Unlock()
}
case <-contextDone:
- fail("Context was cancelled")
+ err := context.Cause(assertion.ctx)
+ if err != nil && err != context.Canceled {
+ fail(fmt.Sprintf("Context was cancelled (cause: %s)", err))
+ } else {
+ fail("Context was cancelled")
+ }
return false
case <-timeout:
if assertion.asyncType == AsyncAssertionTypeEventually {
diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
index 6e0d90d3a1..1019deb88e 100644
--- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go
+++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
@@ -8,10 +8,11 @@ import (
)
type DurationBundle struct {
- EventuallyTimeout time.Duration
- EventuallyPollingInterval time.Duration
- ConsistentlyDuration time.Duration
- ConsistentlyPollingInterval time.Duration
+ EventuallyTimeout time.Duration
+ EventuallyPollingInterval time.Duration
+ ConsistentlyDuration time.Duration
+ ConsistentlyPollingInterval time.Duration
+ EnforceDefaultTimeoutsWhenUsingContexts bool
}
const (
@@ -20,15 +21,19 @@ const (
ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION"
ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL"
+
+ EnforceDefaultTimeoutsWhenUsingContextsEnvVarName = "GOMEGA_ENFORCE_DEFAULT_TIMEOUTS_WHEN_USING_CONTEXTS"
)
func FetchDefaultDurationBundle() DurationBundle {
+ _, EnforceDefaultTimeoutsWhenUsingContexts := os.LookupEnv(EnforceDefaultTimeoutsWhenUsingContextsEnvVarName)
return DurationBundle{
EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second),
EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond),
- ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond),
- ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond),
+ ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond),
+ ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond),
+ EnforceDefaultTimeoutsWhenUsingContexts: EnforceDefaultTimeoutsWhenUsingContexts,
}
}
@@ -44,7 +49,7 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration {
return duration
}
-func toDuration(input interface{}) (time.Duration, error) {
+func toDuration(input any) (time.Duration, error) {
duration, ok := input.(time.Duration)
if ok {
return duration, nil
diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go
index de1f4f336e..66dfe7d041 100644
--- a/vendor/github.com/onsi/gomega/internal/gomega.go
+++ b/vendor/github.com/onsi/gomega/internal/gomega.go
@@ -40,45 +40,45 @@ func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega {
return g
}
-func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) Ω(actual any, extra ...any) types.Assertion {
return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) Expect(actual any, extra ...any) types.Assertion {
return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) ExpectWithOffset(offset int, actual any, extra ...any) types.Assertion {
return NewAssertion(actual, g, offset, extra...)
}
-func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) Eventually(actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
}
-func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
}
-func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) Consistently(actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
}
-func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
}
-func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
baseOffset := 3
timeoutInterval := -time.Duration(1)
pollingInterval := -time.Duration(1)
- intervals := []interface{}{}
+ intervals := []any{}
var ctx context.Context
actual := actualOrCtx
startingIndex := 0
if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
- // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
+ // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argument **and** the second argument is not a parseable duration
// this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
if _, err := toDuration(args[0]); err != nil {
ctx = actualOrCtx.(context.Context)
@@ -127,3 +127,11 @@ func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) {
func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) {
g.DurationBundle.ConsistentlyPollingInterval = t
}
+
+func (g *Gomega) EnforceDefaultTimeoutsWhenUsingContexts() {
+ g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = true
+}
+
+func (g *Gomega) DisableDefaultTimeoutsWhenUsingContext() {
+ g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = false
+}
diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
index 83b04b1a4c..450c403330 100644
--- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
+++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
@@ -17,6 +17,7 @@ type PollingSignalError interface {
error
Wrap(err error) PollingSignalError
Attach(description string, obj any) PollingSignalError
+ Successfully() PollingSignalError
Now()
}
@@ -45,6 +46,7 @@ type PollingSignalErrorImpl struct {
wrappedErr error
pollingSignalErrorType PollingSignalErrorType
duration time.Duration
+ successful bool
Attachments []PollingSignalErrorAttachment
}
@@ -73,6 +75,11 @@ func (s *PollingSignalErrorImpl) Unwrap() error {
return s.wrappedErr
}
+func (s *PollingSignalErrorImpl) Successfully() PollingSignalError {
+ s.successful = true
+ return s
+}
+
func (s *PollingSignalErrorImpl) Now() {
panic(s)
}
@@ -81,6 +88,10 @@ func (s *PollingSignalErrorImpl) IsStopTrying() bool {
return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying
}
+func (s *PollingSignalErrorImpl) IsSuccessful() bool {
+ return s.successful
+}
+
func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool {
return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter
}
@@ -89,7 +100,7 @@ func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration {
return s.duration
}
-func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) {
+func AsPollingSignalError(actual any) (*PollingSignalErrorImpl, bool) {
if actual == nil {
return nil, false
}
diff --git a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
index f295876417..b748de41f1 100644
--- a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
+++ b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
@@ -10,7 +10,7 @@ import (
// Gomega matcher at the beginning it panics. This allows for rendering Gomega
// matchers as part of an optional Description, as long as they're not in the
// first slot.
-func vetOptionalDescription(assertion string, optionalDescription ...interface{}) {
+func vetOptionalDescription(assertion string, optionalDescription ...any) {
if len(optionalDescription) == 0 {
return
}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index cd3f431d2f..10b6693fd6 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -1,6 +1,7 @@
package gomega
import (
+ "fmt"
"time"
"github.com/google/go-cmp/cmp"
@@ -11,7 +12,7 @@ import (
// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
// types when performing comparisons.
// It is an error for both actual and expected to be nil. Use BeNil() instead.
-func Equal(expected interface{}) types.GomegaMatcher {
+func Equal(expected any) types.GomegaMatcher {
return &matchers.EqualMatcher{
Expected: expected,
}
@@ -21,7 +22,7 @@ func Equal(expected interface{}) types.GomegaMatcher {
// This is done by converting actual to have the type of expected before
// attempting equality with reflect.DeepEqual.
// It is an error for actual and expected to be nil. Use BeNil() instead.
-func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
+func BeEquivalentTo(expected any) types.GomegaMatcher {
return &matchers.BeEquivalentToMatcher{
Expected: expected,
}
@@ -30,7 +31,7 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison.
// You can pass cmp.Option as options.
// It is an error for actual and expected to be nil. Use BeNil() instead.
-func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher {
+func BeComparableTo(expected any, opts ...cmp.Option) types.GomegaMatcher {
return &matchers.BeComparableToMatcher{
Expected: expected,
Options: opts,
@@ -40,7 +41,7 @@ func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatche
// BeIdenticalTo uses the == operator to compare actual with expected.
// BeIdenticalTo is strict about types when performing comparisons.
// It is an error for both actual and expected to be nil. Use BeNil() instead.
-func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
+func BeIdenticalTo(expected any) types.GomegaMatcher {
return &matchers.BeIdenticalToMatcher{
Expected: expected,
}
@@ -52,15 +53,31 @@ func BeNil() types.GomegaMatcher {
}
// BeTrue succeeds if actual is true
+//
+// In general, it's better to use `BeTrueBecause(reason)` to provide a more useful error message if a true check fails.
func BeTrue() types.GomegaMatcher {
return &matchers.BeTrueMatcher{}
}
// BeFalse succeeds if actual is false
+//
+// In general, it's better to use `BeFalseBecause(reason)` to provide a more useful error message if a false check fails.
func BeFalse() types.GomegaMatcher {
return &matchers.BeFalseMatcher{}
}
+// BeTrueBecause succeeds if actual is true and displays the provided reason if it is false
+// fmt.Sprintf is used to render the reason
+func BeTrueBecause(format string, args ...any) types.GomegaMatcher {
+ return &matchers.BeTrueMatcher{Reason: fmt.Sprintf(format, args...)}
+}
+
+// BeFalseBecause succeeds if actual is false and displays the provided reason if it is true.
+// fmt.Sprintf is used to render the reason
+func BeFalseBecause(format string, args ...any) types.GomegaMatcher {
+ return &matchers.BeFalseMatcher{Reason: fmt.Sprintf(format, args...)}
+}
+
// HaveOccurred succeeds if actual is a non-nil error
// The typical Go error checking pattern looks like:
//
@@ -122,7 +139,7 @@ func Succeed() types.GomegaMatcher {
// Error interface
//
// The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases.
-func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher {
+func MatchError(expected any, functionErrorDescription ...any) types.GomegaMatcher {
return &matchers.MatchErrorMatcher{
Expected: expected,
FuncErrDescription: functionErrorDescription,
@@ -177,20 +194,21 @@ func BeClosed() types.GomegaMatcher {
//
// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
//
-// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
+// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
//
// var myThing thing
// Eventually(thingChan).Should(Receive(&myThing))
// Expect(myThing.Sprocket).Should(Equal("foo"))
// Expect(myThing.IsValid()).Should(BeTrue())
-func Receive(args ...interface{}) types.GomegaMatcher {
- var arg interface{}
- if len(args) > 0 {
- arg = args[0]
- }
-
+//
+// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received,
+// you can pass a pointer to a variable of the appropriate type first, and second a matcher:
+//
+// var myThing thing
+// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar")))
+func Receive(args ...any) types.GomegaMatcher {
return &matchers.ReceiveMatcher{
- Arg: arg,
+ Args: args,
}
}
@@ -206,7 +224,7 @@ func Receive(args ...interface{}) types.GomegaMatcher {
//
// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
-func BeSent(arg interface{}) types.GomegaMatcher {
+func BeSent(arg any) types.GomegaMatcher {
return &matchers.BeSentMatcher{
Arg: arg,
}
@@ -215,7 +233,7 @@ func BeSent(arg interface{}) types.GomegaMatcher {
// MatchRegexp succeeds if actual is a string or stringer that matches the
// passed-in regexp. Optional arguments can be provided to construct a regexp
// via fmt.Sprintf().
-func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
+func MatchRegexp(regexp string, args ...any) types.GomegaMatcher {
return &matchers.MatchRegexpMatcher{
Regexp: regexp,
Args: args,
@@ -225,7 +243,7 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
// ContainSubstring succeeds if actual is a string or stringer that contains the
// passed-in substring. Optional arguments can be provided to construct the substring
// via fmt.Sprintf().
-func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
+func ContainSubstring(substr string, args ...any) types.GomegaMatcher {
return &matchers.ContainSubstringMatcher{
Substr: substr,
Args: args,
@@ -235,7 +253,7 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
// HavePrefix succeeds if actual is a string or stringer that contains the
// passed-in string as a prefix. Optional arguments can be provided to construct
// via fmt.Sprintf().
-func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
+func HavePrefix(prefix string, args ...any) types.GomegaMatcher {
return &matchers.HavePrefixMatcher{
Prefix: prefix,
Args: args,
@@ -245,7 +263,7 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
// HaveSuffix succeeds if actual is a string or stringer that contains the
// passed-in string as a suffix. Optional arguments can be provided to construct
// via fmt.Sprintf().
-func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
+func HaveSuffix(suffix string, args ...any) types.GomegaMatcher {
return &matchers.HaveSuffixMatcher{
Suffix: suffix,
Args: args,
@@ -255,7 +273,7 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
// MatchJSON succeeds if actual is a string or stringer of JSON that matches
// the expected JSON. The JSONs are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchJSON(json interface{}) types.GomegaMatcher {
+func MatchJSON(json any) types.GomegaMatcher {
return &matchers.MatchJSONMatcher{
JSONToMatch: json,
}
@@ -264,7 +282,7 @@ func MatchJSON(json interface{}) types.GomegaMatcher {
// MatchXML succeeds if actual is a string or stringer of XML that matches
// the expected XML. The XMLs are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like whitespaces shouldn't matter.
-func MatchXML(xml interface{}) types.GomegaMatcher {
+func MatchXML(xml any) types.GomegaMatcher {
return &matchers.MatchXMLMatcher{
XMLToMatch: xml,
}
@@ -273,7 +291,7 @@ func MatchXML(xml interface{}) types.GomegaMatcher {
// MatchYAML succeeds if actual is a string or stringer of YAML that matches
// the expected YAML. The YAML's are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchYAML(yaml interface{}) types.GomegaMatcher {
+func MatchYAML(yaml any) types.GomegaMatcher {
return &matchers.MatchYAMLMatcher{
YAMLToMatch: yaml,
}
@@ -320,7 +338,7 @@ func BeZero() types.GomegaMatcher {
//
// var findings []string
// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings)))
-func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher {
+func ContainElement(element any, result ...any) types.GomegaMatcher {
return &matchers.ContainElementMatcher{
Element: element,
Result: result,
@@ -340,7 +358,7 @@ func ContainElement(element interface{}, result ...interface{}) types.GomegaMatc
// Expect(2).Should(BeElementOf(1, 2))
//
// Actual must be typed.
-func BeElementOf(elements ...interface{}) types.GomegaMatcher {
+func BeElementOf(elements ...any) types.GomegaMatcher {
return &matchers.BeElementOfMatcher{
Elements: elements,
}
@@ -350,7 +368,7 @@ func BeElementOf(elements ...interface{}) types.GomegaMatcher {
// BeKeyOf() always uses Equal() to perform the match between actual and the map keys.
//
// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false}))
-func BeKeyOf(element interface{}) types.GomegaMatcher {
+func BeKeyOf(element any) types.GomegaMatcher {
return &matchers.BeKeyOfMatcher{
Map: element,
}
@@ -370,14 +388,14 @@ func BeKeyOf(element interface{}) types.GomegaMatcher {
//
// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
//
-// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
-func ConsistOf(elements ...interface{}) types.GomegaMatcher {
+// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []any are different types - hence the need for this special rule.
+func ConsistOf(elements ...any) types.GomegaMatcher {
return &matchers.ConsistOfMatcher{
Elements: elements,
}
}
-// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter.
+// HaveExactElements succeeds if actual contains elements that precisely match the elements passed into the matcher. The ordering of the elements does matter.
// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
//
// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar"))
@@ -385,7 +403,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher {
// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo")))
//
// Actual must be an array or slice.
-func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
+func HaveExactElements(elements ...any) types.GomegaMatcher {
return &matchers.HaveExactElementsMatcher{
Elements: elements,
}
@@ -399,7 +417,7 @@ func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
//
// Actual must be an array, slice or map.
// For maps, ContainElements searches through the map's values.
-func ContainElements(elements ...interface{}) types.GomegaMatcher {
+func ContainElements(elements ...any) types.GomegaMatcher {
return &matchers.ContainElementsMatcher{
Elements: elements,
}
@@ -414,7 +432,7 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher {
//
// Actual must be an array, slice or map.
// For maps, HaveEach searches through the map's values.
-func HaveEach(element interface{}) types.GomegaMatcher {
+func HaveEach(element any) types.GomegaMatcher {
return &matchers.HaveEachMatcher{
Element: element,
}
@@ -425,7 +443,7 @@ func HaveEach(element interface{}) types.GomegaMatcher {
// matcher can be passed in instead:
//
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
-func HaveKey(key interface{}) types.GomegaMatcher {
+func HaveKey(key any) types.GomegaMatcher {
return &matchers.HaveKeyMatcher{
Key: key,
}
@@ -437,7 +455,7 @@ func HaveKey(key interface{}) types.GomegaMatcher {
//
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
-func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
+func HaveKeyWithValue(key any, value any) types.GomegaMatcher {
return &matchers.HaveKeyWithValueMatcher{
Key: key,
Value: value,
@@ -465,7 +483,7 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
-func HaveField(field string, expected interface{}) types.GomegaMatcher {
+func HaveField(field string, expected any) types.GomegaMatcher {
return &matchers.HaveFieldMatcher{
Field: field,
Expected: expected,
@@ -517,7 +535,7 @@ func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
// Expect(1.0).Should(BeNumerically(">=", 1.0))
// Expect(1.0).Should(BeNumerically("<", 3))
// Expect(1.0).Should(BeNumerically("<=", 1.0))
-func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
+func BeNumerically(comparator string, compareTo ...any) types.GomegaMatcher {
return &matchers.BeNumericallyMatcher{
Comparator: comparator,
CompareTo: compareTo,
@@ -544,7 +562,7 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura
// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
-func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
+func BeAssignableToTypeOf(expected any) types.GomegaMatcher {
return &matchers.AssignableToTypeOfMatcher{
Expected: expected,
}
@@ -563,7 +581,7 @@ func Panic() types.GomegaMatcher {
// matcher can be passed in instead:
//
// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`)))
-func PanicWith(expected interface{}) types.GomegaMatcher {
+func PanicWith(expected any) types.GomegaMatcher {
return &matchers.PanicMatcher{Expected: expected}
}
@@ -592,7 +610,7 @@ func BeADirectory() types.GomegaMatcher {
// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
-func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
+func HaveHTTPStatus(expected ...any) types.GomegaMatcher {
return &matchers.HaveHTTPStatusMatcher{Expected: expected}
}
@@ -600,7 +618,7 @@ func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
// Actual must be either a *http.Response or *httptest.ResponseRecorder.
// Expected must be a string header name, followed by a header value which
// can be a string, or another matcher.
-func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher {
+func HaveHTTPHeaderWithValue(header string, value any) types.GomegaMatcher {
return &matchers.HaveHTTPHeaderWithValueMatcher{
Header: header,
Value: value,
@@ -610,7 +628,7 @@ func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatch
// HaveHTTPBody matches if the body matches.
// Actual must be either a *http.Response or *httptest.ResponseRecorder.
// Expected must be either a string, []byte, or other matcher
-func HaveHTTPBody(expected interface{}) types.GomegaMatcher {
+func HaveHTTPBody(expected any) types.GomegaMatcher {
return &matchers.HaveHTTPBodyMatcher{Expected: expected}
}
@@ -669,15 +687,15 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
// Expect(1).To(WithTransform(failingplus1, Equal(2)))
//
// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
-func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
+func WithTransform(transform any, matcher types.GomegaMatcher) types.GomegaMatcher {
return matchers.NewWithTransformMatcher(transform, matcher)
}
// Satisfy matches the actual value against the `predicate` function.
-// The given predicate must be a function of one paramter that returns bool.
+// The given predicate must be a function of one parameter that returns bool.
//
// var isEven = func(i int) bool { return i%2 == 0 }
// Expect(2).To(Satisfy(isEven))
-func Satisfy(predicate interface{}) types.GomegaMatcher {
+func Satisfy(predicate any) types.GomegaMatcher {
return matchers.NewSatisfyMatcher(predicate)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go
index 6bd826adc5..db48e90b37 100644
--- a/vendor/github.com/onsi/gomega/matchers/and.go
+++ b/vendor/github.com/onsi/gomega/matchers/and.go
@@ -14,7 +14,7 @@ type AndMatcher struct {
firstFailedMatcher types.GomegaMatcher
}
-func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *AndMatcher) Match(actual any) (success bool, err error) {
m.firstFailedMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
@@ -26,16 +26,16 @@ func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
return true, nil
}
-func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *AndMatcher) FailureMessage(actual any) (message string) {
return m.firstFailedMatcher.FailureMessage(actual)
}
-func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *AndMatcher) NegatedFailureMessage(actual any) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
}
-func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *AndMatcher) MatchMayChangeInTheFuture(actual any) bool {
/*
Example with 3 matchers: A, B, C
diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
index be48395201..a100e5c07e 100644
--- a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
@@ -10,10 +10,10 @@ import (
)
type AssignableToTypeOfMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *AssignableToTypeOfMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
} else if matcher.Expected == nil {
@@ -28,10 +28,10 @@ func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success boo
return actualType.AssignableTo(expectedType), nil
}
-func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
+func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual any) string {
return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
}
-func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
+func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual any) string {
return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
index 93d4497c70..1d82360484 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
@@ -24,11 +24,11 @@ func (t notADirectoryError) Error() string {
}
type BeADirectoryMatcher struct {
- expected interface{}
+ expected any
err error
}
-func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeADirectoryMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
@@ -47,10 +47,10 @@ func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err
return true, nil
}
-func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeADirectoryMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
}
-func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not be a directory")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
index 8fefc4deb7..3e53d6285b 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
@@ -24,11 +24,11 @@ func (t notARegularFileError) Error() string {
}
type BeARegularFileMatcher struct {
- expected interface{}
+ expected any
err error
}
-func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeARegularFileMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
@@ -47,10 +47,10 @@ func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, e
return true, nil
}
-func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeARegularFileMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
}
-func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not be a regular file")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
index e2bdd28113..04f156db39 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
@@ -10,10 +10,10 @@ import (
)
type BeAnExistingFileMatcher struct {
- expected interface{}
+ expected any
}
-func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeAnExistingFileMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
@@ -31,10 +31,10 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool,
return true, nil
}
-func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeAnExistingFileMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to exist")
}
-func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to exist")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
index f13c24490f..4319dde455 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
@@ -12,7 +12,7 @@ import (
type BeClosedMatcher struct {
}
-func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeClosedMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -39,10 +39,10 @@ func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err err
return closed, nil
}
-func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeClosedMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be closed")
}
-func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeClosedMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "to be open")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
index 8ab4bb9194..ce74eee4c7 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"bytes"
+ "errors"
"fmt"
"github.com/google/go-cmp/cmp"
@@ -9,11 +10,11 @@ import (
)
type BeComparableToMatcher struct {
- Expected interface{}
+ Expected any
Options cmp.Options
}
-func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m
if err, ok := r.(error); ok {
matchErr = err
} else if errMsg, ok := r.(string); ok {
- matchErr = fmt.Errorf(errMsg)
+ matchErr = errors.New(errMsg)
}
}
}()
@@ -40,10 +41,10 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m
return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil
}
-func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) {
- return cmp.Diff(matcher.Expected, actual, matcher.Options)
+func (matcher *BeComparableToMatcher) FailureMessage(actual any) (message string) {
+ return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...))
}
-func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to equal", matcher.Expected)
+func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual any) (message string) {
+ return format.Message(actual, "not to be comparable to", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
index 9ee75a5d51..406fe54843 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
@@ -10,10 +10,10 @@ import (
)
type BeElementOfMatcher struct {
- Elements []interface{}
+ Elements []any
}
-func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeElementOfMatcher) Match(actual any) (success bool, err error) {
if reflect.TypeOf(actual) == nil {
return false, fmt.Errorf("BeElement matcher expects actual to be typed")
}
@@ -34,10 +34,10 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err
return false, lastError
}
-func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeElementOfMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be an element of", presentable(matcher.Elements))
}
-func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be an element of", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
index 527c1a1c10..e9e0644f32 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
@@ -4,26 +4,40 @@ package matchers
import (
"fmt"
+ "reflect"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
)
type BeEmptyMatcher struct {
}
-func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeEmptyMatcher) Match(actual any) (success bool, err error) {
+ // short-circuit the iterator case, as we only need to see the first
+ // element, if any.
+ if miter.IsIter(actual) {
+ var length int
+ if miter.IsSeq2(actual) {
+ miter.IterateKV(actual, func(k, v reflect.Value) bool { length++; return false })
+ } else {
+ miter.IterateV(actual, func(v reflect.Value) bool { length++; return false })
+ }
+ return length == 0, nil
+ }
+
length, ok := lengthOf(actual)
if !ok {
- return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
+ return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1))
}
return length == 0, nil
}
-func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeEmptyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be empty")
}
-func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be empty")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
index 263627f408..37b3080ba7 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
@@ -10,10 +10,10 @@ import (
)
type BeEquivalentToMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeEquivalentToMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Both actual and expected must not be nil.")
}
@@ -27,10 +27,10 @@ func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, e
return reflect.DeepEqual(convertedActual, matcher.Expected), nil
}
-func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeEquivalentToMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be equivalent to", matcher.Expected)
}
-func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be equivalent to", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
index e326c01577..55e869515a 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
@@ -9,9 +9,10 @@ import (
)
type BeFalseMatcher struct {
+ Reason string
}
-func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeFalseMatcher) Match(actual any) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
@@ -19,10 +20,18 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro
return actual == false, nil
}
-func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to be false")
+func (matcher *BeFalseMatcher) FailureMessage(actual any) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "to be false")
+ } else {
+ return matcher.Reason
+ }
}
-func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to be false")
+func (matcher *BeFalseMatcher) NegatedFailureMessage(actual any) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "not to be false")
+ } else {
+ return fmt.Sprintf(`Expected not false but got false\nNegation of "%s" failed`, matcher.Reason)
+ }
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
index 631ce11e33..579aa41b31 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
@@ -10,10 +10,10 @@ import (
)
type BeIdenticalToMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+func (matcher *BeIdenticalToMatcher) Match(actual any) (success bool, matchErr error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -30,10 +30,10 @@ func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, ma
return actual == matcher.Expected, nil
}
-func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string {
+func (matcher *BeIdenticalToMatcher) FailureMessage(actual any) string {
return format.Message(actual, "to be identical to", matcher.Expected)
}
-func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string {
+func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual any) string {
return format.Message(actual, "not to be identical to", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
index 449a291ef9..3fff3df784 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
@@ -8,10 +8,10 @@ import (
)
type BeKeyOfMatcher struct {
- Map interface{}
+ Map any
}
-func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeKeyOfMatcher) Match(actual any) (success bool, err error) {
if !isMap(matcher.Map) {
return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type")
}
@@ -36,10 +36,10 @@ func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err erro
return false, lastError
}
-func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeKeyOfMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map)))
}
-func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map)))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
index 551d99d747..cab37f4f95 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
@@ -7,14 +7,14 @@ import "github.com/onsi/gomega/format"
type BeNilMatcher struct {
}
-func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeNilMatcher) Match(actual any) (success bool, err error) {
return isNil(actual), nil
}
-func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeNilMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be nil")
}
-func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeNilMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be nil")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
index 100735de32..7e6ce154e1 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
@@ -11,18 +11,18 @@ import (
type BeNumericallyMatcher struct {
Comparator string
- CompareTo []interface{}
+ CompareTo []any
}
-func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeNumericallyMatcher) FailureMessage(actual any) (message string) {
return matcher.FormatFailureMessage(actual, false)
}
-func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual any) (message string) {
return matcher.FormatFailureMessage(actual, true)
}
-func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) {
+func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual any, negated bool) (message string) {
if len(matcher.CompareTo) == 1 {
message = fmt.Sprintf("to be %s", matcher.Comparator)
} else {
@@ -34,7 +34,7 @@ func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, ne
return format.Message(actual, message, matcher.CompareTo[0])
}
-func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeNumericallyMatcher) Match(actual any) (success bool, err error) {
if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
index cf582a3fcb..14ffbf6c4c 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
@@ -10,11 +10,11 @@ import (
)
type BeSentMatcher struct {
- Arg interface{}
+ Arg any
channelClosed bool
}
-func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeSentMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -56,15 +56,15 @@ func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error
return didSend, nil
}
-func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeSentMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to send:", matcher.Arg)
}
-func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeSentMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to send:", matcher.Arg)
}
-func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual any) bool {
if !isChan(actual) {
return false
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
index dec4db024e..edb647c6f2 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
@@ -15,17 +15,17 @@ type BeTemporallyMatcher struct {
Threshold []time.Duration
}
-func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeTemporallyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
}
-func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
}
-func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
+func (matcher *BeTemporallyMatcher) Match(actual any) (bool, error) {
// predicate to test for time.Time type
- isTime := func(t interface{}) bool {
+ isTime := func(t any) bool {
_, ok := t.(time.Time)
return ok
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
index 60bc1e3fa7..a010bec5ad 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
@@ -9,9 +9,10 @@ import (
)
type BeTrueMatcher struct {
+ Reason string
}
-func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeTrueMatcher) Match(actual any) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
@@ -19,10 +20,18 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error
return actual.(bool), nil
}
-func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to be true")
+func (matcher *BeTrueMatcher) FailureMessage(actual any) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "to be true")
+ } else {
+ return matcher.Reason
+ }
}
-func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to be true")
+func (matcher *BeTrueMatcher) NegatedFailureMessage(actual any) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "not to be true")
+ } else {
+ return fmt.Sprintf(`Expected not true but got true\nNegation of "%s" failed`, matcher.Reason)
+ }
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
index 26196f168f..f5f5d7f7d7 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
@@ -9,7 +9,7 @@ import (
type BeZeroMatcher struct {
}
-func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeZeroMatcher) Match(actual any) (success bool, err error) {
if actual == nil {
return true, nil
}
@@ -19,10 +19,10 @@ func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error
}
-func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeZeroMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be zero-valued")
}
-func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeZeroMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be zero-valued")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go
index f69037a4f0..05c751b664 100644
--- a/vendor/github.com/onsi/gomega/matchers/consist_of.go
+++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go
@@ -7,18 +7,19 @@ import (
"reflect"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
)
type ConsistOfMatcher struct {
- Elements []interface{}
- missingElements []interface{}
- extraElements []interface{}
+ Elements []any
+ missingElements []any
+ extraElements []any
}
-func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
- if !isArrayOrSlice(actual) && !isMap(actual) {
- return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
+func (matcher *ConsistOfMatcher) Match(actual any) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
+ return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
}
matchers := matchers(matcher.Elements)
@@ -34,19 +35,19 @@ func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err er
return true, nil
}
- var missingMatchers []interface{}
+ var missingMatchers []any
matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges)
matcher.missingElements = equalMatchersToElements(missingMatchers)
return false, nil
}
-func neighbours(value, matcher interface{}) (bool, error) {
+func neighbours(value, matcher any) (bool, error) {
match, err := matcher.(omegaMatcher).Match(value)
return match && err == nil, nil
}
-func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
+func equalMatchersToElements(matchers []any) (elements []any) {
for _, matcher := range matchers {
if equalMatcher, ok := matcher.(*EqualMatcher); ok {
elements = append(elements, equalMatcher.Expected)
@@ -59,20 +60,31 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
return
}
-func flatten(elems []interface{}) []interface{} {
- if len(elems) != 1 || !isArrayOrSlice(elems[0]) {
+func flatten(elems []any) []any {
+ if len(elems) != 1 ||
+ !(isArrayOrSlice(elems[0]) ||
+ (miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) {
return elems
}
+ if miter.IsIter(elems[0]) {
+ flattened := []any{}
+ miter.IterateV(elems[0], func(v reflect.Value) bool {
+ flattened = append(flattened, v.Interface())
+ return true
+ })
+ return flattened
+ }
+
value := reflect.ValueOf(elems[0])
- flattened := make([]interface{}, value.Len())
+ flattened := make([]any, value.Len())
for i := 0; i < value.Len(); i++ {
flattened[i] = value.Index(i).Interface()
}
return flattened
}
-func matchers(expectedElems []interface{}) (matchers []interface{}) {
+func matchers(expectedElems []any) (matchers []any) {
for _, e := range flatten(expectedElems) {
if e == nil {
matchers = append(matchers, &BeNilMatcher{})
@@ -85,11 +97,11 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) {
return
}
-func presentable(elems []interface{}) interface{} {
+func presentable(elems []any) any {
elems = flatten(elems)
if len(elems) == 0 {
- return []interface{}{}
+ return []any{}
}
sv := reflect.ValueOf(elems)
@@ -113,10 +125,22 @@ func presentable(elems []interface{}) interface{} {
return ss.Interface()
}
-func valuesOf(actual interface{}) []interface{} {
+func valuesOf(actual any) []any {
value := reflect.ValueOf(actual)
- values := []interface{}{}
- if isMap(actual) {
+ values := []any{}
+ if miter.IsIter(actual) {
+ if miter.IsSeq2(actual) {
+ miter.IterateKV(actual, func(k, v reflect.Value) bool {
+ values = append(values, v.Interface())
+ return true
+ })
+ } else {
+ miter.IterateV(actual, func(v reflect.Value) bool {
+ values = append(values, v.Interface())
+ return true
+ })
+ }
+ } else if isMap(actual) {
keys := value.MapKeys()
for i := 0; i < value.Len(); i++ {
values = append(values, value.MapIndex(keys[i]).Interface())
@@ -130,7 +154,7 @@ func valuesOf(actual interface{}) []interface{} {
return values
}
-func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ConsistOfMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to consist of", presentable(matcher.Elements))
message = appendMissingElements(message, matcher.missingElements)
if len(matcher.extraElements) > 0 {
@@ -140,7 +164,7 @@ func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message str
return
}
-func appendMissingElements(message string, missingElements []interface{}) string {
+func appendMissingElements(message string, missingElements []any) string {
if len(missingElements) == 0 {
return message
}
@@ -148,6 +172,6 @@ func appendMissingElements(message string, missingElements []interface{}) string
format.Object(presentable(missingElements), 1))
}
-func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to consist of", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
index 3d45c9ebc6..8337a5261c 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -8,24 +8,27 @@ import (
"reflect"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
)
type ContainElementMatcher struct {
- Element interface{}
- Result []interface{}
+ Element any
+ Result []any
}
-func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
- if !isArrayOrSlice(actual) && !isMap(actual) {
- return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
+func (matcher *ContainElementMatcher) Match(actual any) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
+ return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1))
}
var actualT reflect.Type
var result reflect.Value
- switch l := len(matcher.Result); {
- case l > 1:
+ switch numResultArgs := len(matcher.Result); {
+ case numResultArgs > 1:
return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at")
- case l == 1:
+ case numResultArgs == 1:
+ // Check the optional result arg to point to a single value/array/slice/map
+ // of a type compatible with the actual value.
if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr {
return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s",
format.Object(matcher.Result[0], 1))
@@ -34,93 +37,209 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
resultReference := matcher.Result[0]
result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings
switch result.Kind() {
- case reflect.Array:
+ case reflect.Array: // result arrays are not supported, as they cannot be dynamically sized.
+ if miter.IsIter(actual) {
+ _, actualvT := miter.IterKVTypes(actual)
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.SliceOf(actualvT), result.Type().String())
+ }
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
reflect.SliceOf(actualT.Elem()).String(), result.Type().String())
- case reflect.Slice:
- if !isArrayOrSlice(actual) {
+
+ case reflect.Slice: // result slice
+ // can we assign elements in actual to elements in what the result
+ // arg points to?
+ // - ✔ actual is an array or slice
+ // - ✔ actual is an iter.Seq producing "v" elements
+ // - ✔ actual is an iter.Seq2 producing "v" elements, ignoring
+ // the "k" elements.
+ switch {
+ case isArrayOrSlice(actual):
+ if !actualT.Elem().AssignableTo(result.Type().Elem()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+
+ case miter.IsIter(actual):
+ _, actualvT := miter.IterKVTypes(actual)
+ if !actualvT.AssignableTo(result.Type().Elem()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualvT.String(), result.Type().String())
+ }
+
+ default: // incompatible result reference
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String())
}
- if !actualT.Elem().AssignableTo(result.Type().Elem()) {
- return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
- actualT.String(), result.Type().String())
- }
- case reflect.Map:
- if !isMap(actual) {
- return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
- actualT.String(), result.Type().String())
- }
- if !actualT.AssignableTo(result.Type()) {
+
+ case reflect.Map: // result map
+ // can we assign elements in actual to elements in what the result
+ // arg points to?
+ // - ✔ actual is a map
+ // - ✔ actual is an iter.Seq2 (iter.Seq doesn't fit though)
+ switch {
+ case isMap(actual):
+ if !actualT.AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+
+ case miter.IsIter(actual):
+ actualkT, actualvT := miter.IterKVTypes(actual)
+ if actualkT == nil {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.SliceOf(actualvT).String(), result.Type().String())
+ }
+ if !reflect.MapOf(actualkT, actualvT).AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.MapOf(actualkT, actualvT), result.Type().String())
+ }
+
+ default: // incompatible result reference
return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
actualT.String(), result.Type().String())
}
+
default:
- if !actualT.Elem().AssignableTo(result.Type()) {
- return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
- actualT.Elem().String(), result.Type().String())
+ // can we assign a (single) element in actual to what the result arg
+ // points to?
+ switch {
+ case miter.IsIter(actual):
+ _, actualvT := miter.IterKVTypes(actual)
+ if !actualvT.AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualvT.String(), result.Type().String())
+ }
+ default:
+ if !actualT.Elem().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.Elem().String(), result.Type().String())
+ }
}
}
}
+ // If the supplied matcher isn't an Omega matcher, default to the Equal
+ // matcher.
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
if !elementIsMatcher {
elemMatcher = &EqualMatcher{Expected: matcher.Element}
}
value := reflect.ValueOf(actual)
- var valueAt func(int) interface{}
- var getFindings func() reflect.Value
- var foundAt func(int)
+ var getFindings func() reflect.Value // abstracts how the findings are collected and stored
+ var lastError error
- if isMap(actual) {
- keys := value.MapKeys()
- valueAt = func(i int) interface{} {
- return value.MapIndex(keys[i]).Interface()
+ if !miter.IsIter(actual) {
+ var valueAt func(int) any
+ var foundAt func(int)
+ // We're dealing with an array/slice/map, so in all cases we can iterate
+ // over the elements in actual using indices (that can be considered
+ // keys in case of maps).
+ if isMap(actual) {
+ keys := value.MapKeys()
+ valueAt = func(i int) any {
+ return value.MapIndex(keys[i]).Interface()
+ }
+ if result.Kind() != reflect.Invalid {
+ fm := reflect.MakeMap(actualT)
+ getFindings = func() reflect.Value { return fm }
+ foundAt = func(i int) {
+ fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
+ }
+ }
+ } else {
+ valueAt = func(i int) any {
+ return value.Index(i).Interface()
+ }
+ if result.Kind() != reflect.Invalid {
+ var fsl reflect.Value
+ if result.Kind() == reflect.Slice {
+ fsl = reflect.MakeSlice(result.Type(), 0, 0)
+ } else {
+ fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
+ }
+ getFindings = func() reflect.Value { return fsl }
+ foundAt = func(i int) {
+ fsl = reflect.Append(fsl, value.Index(i))
+ }
+ }
}
- if result.Kind() != reflect.Invalid {
- fm := reflect.MakeMap(actualT)
- getFindings = func() reflect.Value {
- return fm
+
+ for i := 0; i < value.Len(); i++ {
+ elem := valueAt(i)
+ success, err := elemMatcher.Match(elem)
+ if err != nil {
+ lastError = err
+ continue
}
- foundAt = func(i int) {
- fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
+ if success {
+ if result.Kind() == reflect.Invalid {
+ return true, nil
+ }
+ foundAt(i)
}
}
} else {
- valueAt = func(i int) interface{} {
- return value.Index(i).Interface()
- }
+ // We're dealing with an iterator as a first-class construct, so things
+ // are slightly different: there is no index defined as in case of
+ // arrays/slices/maps, just "ooooorder"
+ var found func(k, v reflect.Value)
if result.Kind() != reflect.Invalid {
- var f reflect.Value
- if result.Kind() == reflect.Slice {
- f = reflect.MakeSlice(result.Type(), 0, 0)
+ if result.Kind() == reflect.Map {
+ fm := reflect.MakeMap(result.Type())
+ getFindings = func() reflect.Value { return fm }
+ found = func(k, v reflect.Value) { fm.SetMapIndex(k, v) }
} else {
- f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
- }
- getFindings = func() reflect.Value {
- return f
- }
- foundAt = func(i int) {
- f = reflect.Append(f, value.Index(i))
+ var fsl reflect.Value
+ if result.Kind() == reflect.Slice {
+ fsl = reflect.MakeSlice(result.Type(), 0, 0)
+ } else {
+ fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
+ }
+ getFindings = func() reflect.Value { return fsl }
+ found = func(_, v reflect.Value) { fsl = reflect.Append(fsl, v) }
}
}
- }
- var lastError error
- for i := 0; i < value.Len(); i++ {
- elem := valueAt(i)
- success, err := elemMatcher.Match(elem)
- if err != nil {
- lastError = err
- continue
+ success := false
+ actualkT, _ := miter.IterKVTypes(actual)
+ if actualkT == nil {
+ miter.IterateV(actual, func(v reflect.Value) bool {
+ var err error
+ success, err = elemMatcher.Match(v.Interface())
+ if err != nil {
+ lastError = err
+ return true // iterate on...
+ }
+ if success {
+ if result.Kind() == reflect.Invalid {
+ return false // a match and no result needed, so we're done
+ }
+ found(reflect.Value{}, v)
+ }
+ return true // iterate on...
+ })
+ } else {
+ miter.IterateKV(actual, func(k, v reflect.Value) bool {
+ var err error
+ success, err = elemMatcher.Match(v.Interface())
+ if err != nil {
+ lastError = err
+ return true // iterate on...
+ }
+ if success {
+ if result.Kind() == reflect.Invalid {
+ return false // a match and no result needed, so we're done
+ }
+ found(k, v)
+ }
+ return true // iterate on...
+ })
}
- if success {
- if result.Kind() == reflect.Invalid {
- return true, nil
- }
- foundAt(i)
+ if success && result.Kind() == reflect.Invalid {
+ return true, nil
}
}
@@ -132,7 +251,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
}
// pick up any findings the test is interested in as it specified a non-nil
- // result reference. However, the expection always is that there are at
+ // result reference. However, the expectation always is that there are at
// least one or multiple findings. So, if a result is expected, but we had
// no findings, then this is an error.
findings := getFindings()
@@ -165,10 +284,10 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
return true, nil
}
-func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
-func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
index 946cd8bea5..ce3041892b 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
@@ -4,17 +4,18 @@ import (
"fmt"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
)
type ContainElementsMatcher struct {
- Elements []interface{}
- missingElements []interface{}
+ Elements []any
+ missingElements []any
}
-func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) {
- if !isArrayOrSlice(actual) && !isMap(actual) {
- return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
+func (matcher *ContainElementsMatcher) Match(actual any) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
+ return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
}
matchers := matchers(matcher.Elements)
@@ -34,11 +35,11 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool,
return false, nil
}
-func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementsMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to contain elements", presentable(matcher.Elements))
return appendMissingElements(message, matcher.missingElements)
}
-func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
index e725f8c275..d9980ee26b 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
@@ -11,10 +11,10 @@ import (
type ContainSubstringMatcher struct {
Substr string
- Args []interface{}
+ Args []any
}
-func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ContainSubstringMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -31,10 +31,10 @@ func (matcher *ContainSubstringMatcher) stringToMatch() string {
return stringToMatch
}
-func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainSubstringMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain substring", matcher.stringToMatch())
}
-func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain substring", matcher.stringToMatch())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
index befb7bdfd8..4ad166157a 100644
--- a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
@@ -9,10 +9,10 @@ import (
)
type EqualMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *EqualMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -27,7 +27,7 @@ func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error)
return reflect.DeepEqual(actual, matcher.Expected), nil
}
-func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) FailureMessage(actual any) (message string) {
actualString, actualOK := actual.(string)
expectedString, expectedOK := matcher.Expected.(string)
if actualOK && expectedOK {
@@ -37,6 +37,6 @@ func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string)
return format.Message(actual, "to equal", matcher.Expected)
}
-func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to equal", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
index 9856752f13..a4fcfc425a 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
@@ -12,7 +12,7 @@ type HaveCapMatcher struct {
Count int
}
-func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveCapMatcher) Match(actual any) (success bool, err error) {
length, ok := capOf(actual)
if !ok {
return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1))
@@ -21,10 +21,10 @@ func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err erro
return length == matcher.Count, nil
}
-func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
index 025b6e1ac2..4c45063bd8 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -5,15 +5,16 @@ import (
"reflect"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
)
type HaveEachMatcher struct {
- Element interface{}
+ Element any
}
-func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
- if !isArrayOrSlice(actual) && !isMap(actual) {
- return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s",
+func (matcher *HaveEachMatcher) Match(actual any) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
+ return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s",
format.Object(actual, 1))
}
@@ -22,25 +23,58 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
elemMatcher = &EqualMatcher{Expected: matcher.Element}
}
+ if miter.IsIter(actual) {
+ // rejecting the non-elements case works different for iterators as we
+ // don't want to fetch all elements into a slice first.
+ count := 0
+ var success bool
+ var err error
+ if miter.IsSeq2(actual) {
+ miter.IterateKV(actual, func(k, v reflect.Value) bool {
+ count++
+ success, err = elemMatcher.Match(v.Interface())
+ if err != nil {
+ return false
+ }
+ return success
+ })
+ } else {
+ miter.IterateV(actual, func(v reflect.Value) bool {
+ count++
+ success, err = elemMatcher.Match(v.Interface())
+ if err != nil {
+ return false
+ }
+ return success
+ })
+ }
+ if count == 0 {
+ return false, fmt.Errorf("HaveEach matcher expects a non-empty iter.Seq/iter.Seq2. Got:\n%s",
+ format.Object(actual, 1))
+ }
+ return success, err
+ }
+
value := reflect.ValueOf(actual)
if value.Len() == 0 {
return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s",
format.Object(actual, 1))
}
- var valueAt func(int) interface{}
+ var valueAt func(int) any
if isMap(actual) {
keys := value.MapKeys()
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.MapIndex(keys[i]).Interface()
}
} else {
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.Index(i).Interface()
}
}
- // if there are no elements, then HaveEach will match.
+ // if we never failed then we succeed; the empty/nil cases have already been
+ // rejected above.
for i := 0; i < value.Len(); i++ {
success, err := elemMatcher.Match(valueAt(i))
if err != nil {
@@ -55,11 +89,11 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
}
// FailureMessage returns a suitable failure message.
-func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
// NegatedFailureMessage returns a suitable negated failure message.
-func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
index dca5b94467..8b2d297c57 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
@@ -2,8 +2,10 @@ package matchers
import (
"fmt"
+ "reflect"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
)
type mismatchFailure struct {
@@ -12,33 +14,77 @@ type mismatchFailure struct {
}
type HaveExactElementsMatcher struct {
- Elements []interface{}
+ Elements []any
mismatchFailures []mismatchFailure
missingIndex int
extraIndex int
}
-func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExactElementsMatcher) Match(actual any) (success bool, err error) {
matcher.resetState()
- if isMap(actual) {
- return false, fmt.Errorf("error")
+ if isMap(actual) || miter.IsSeq2(actual) {
+ return false, fmt.Errorf("HaveExactElements matcher doesn't work on map or iter.Seq2. Got:\n%s", format.Object(actual, 1))
}
matchers := matchers(matcher.Elements)
- values := valuesOf(actual)
-
lenMatchers := len(matchers)
+
+ success = true
+
+ if miter.IsIter(actual) {
+ // In the worst case, we need to see everything before we can give our
+ // verdict. The only exception is fast fail.
+ i := 0
+ miter.IterateV(actual, func(v reflect.Value) bool {
+ if i >= lenMatchers {
+ // the iterator produces more values than we got matchers: this
+ // is not good.
+ matcher.extraIndex = i
+ success = false
+ return false
+ }
+
+ elemMatcher := matchers[i].(omegaMatcher)
+ match, err := elemMatcher.Match(v.Interface())
+ if err != nil {
+ matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
+ index: i,
+ failure: err.Error(),
+ })
+ success = false
+ } else if !match {
+ matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
+ index: i,
+ failure: elemMatcher.FailureMessage(v.Interface()),
+ })
+ success = false
+ }
+ i++
+ return true
+ })
+ if i < len(matchers) {
+ // the iterator produced less values than we got matchers: this is
+ // no good, no no no.
+ matcher.missingIndex = i
+ success = false
+ }
+ return success, nil
+ }
+
+ values := valuesOf(actual)
lenValues := len(values)
for i := 0; i < lenMatchers || i < lenValues; i++ {
if i >= lenMatchers {
matcher.extraIndex = i
+ success = false
continue
}
if i >= lenValues {
matcher.missingIndex = i
+ success = false
return
}
@@ -49,18 +95,20 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool
index: i,
failure: err.Error(),
})
+ success = false
} else if !match {
matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
index: i,
failure: elemMatcher.FailureMessage(values[i]),
})
+ success = false
}
}
- return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil
+ return success, nil
}
-func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements))
if matcher.missingIndex > 0 {
message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex)
@@ -77,7 +125,7 @@ func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (mes
return
}
-func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
index b57018745f..a5a028e9a6 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
@@ -11,7 +11,7 @@ type HaveExistingFieldMatcher struct {
Field string
}
-func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExistingFieldMatcher) Match(actual any) (success bool, err error) {
// we don't care about the field's actual value, just about any error in
// trying to find the field (or method).
_, err = extractField(actual, matcher.Field, "HaveExistingField")
@@ -27,10 +27,10 @@ func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool
return false, err
}
-func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field)
}
-func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go
index 6989f78c4b..d9fbeaf752 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_field.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -17,7 +17,7 @@ func (e missingFieldError) Error() string {
return string(e)
}
-func extractField(actual interface{}, field string, matchername string) (interface{}, error) {
+func extractField(actual any, field string, matchername string) (any, error) {
fields := strings.SplitN(field, ".", 2)
actualValue := reflect.ValueOf(actual)
@@ -40,7 +40,12 @@ func extractField(actual interface{}, field string, matchername string) (interfa
extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()"))
}
if extractedValue == (reflect.Value{}) {
- return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual))
+ ptr := reflect.New(actualValue.Type())
+ ptr.Elem().Set(actualValue)
+ extractedValue = ptr.MethodByName(strings.TrimSuffix(fields[0], "()"))
+ if extractedValue == (reflect.Value{}) {
+ return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual))
+ }
}
t := extractedValue.Type()
if t.NumIn() != 0 || t.NumOut() != 1 {
@@ -63,37 +68,47 @@ func extractField(actual interface{}, field string, matchername string) (interfa
type HaveFieldMatcher struct {
Field string
- Expected interface{}
+ Expected any
+}
- extractedField interface{}
- expectedMatcher omegaMatcher
+func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
+ var isMatcher bool
+ expectedMatcher, isMatcher := matcher.Expected.(omegaMatcher)
+ if !isMatcher {
+ expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
+ }
+ return expectedMatcher
}
-func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
- matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField")
+func (matcher *HaveFieldMatcher) Match(actual any) (success bool, err error) {
+ extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
return false, err
}
- var isMatcher bool
- matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher)
- if !isMatcher {
- matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
- }
-
- return matcher.expectedMatcher.Match(matcher.extractedField)
+ return matcher.expectedMatcher().Match(extractedField)
}
-func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) FailureMessage(actual any) (message string) {
+ extractedField, err := extractField(actual, matcher.Field, "HaveField")
+ if err != nil {
+ // this really shouldn't happen
+ return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err)
+ }
message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field)
- message += matcher.expectedMatcher.FailureMessage(matcher.extractedField)
+ message += matcher.expectedMatcher().FailureMessage(extractedField)
return message
}
-func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual any) (message string) {
+ extractedField, err := extractField(actual, matcher.Field, "HaveField")
+ if err != nil {
+ // this really shouldn't happen
+ return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err)
+ }
message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field)
- message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField)
+ message += matcher.expectedMatcher().NegatedFailureMessage(extractedField)
return message
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
index d14d9e5fc6..2d561b9a22 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -11,12 +11,12 @@ import (
)
type HaveHTTPBodyMatcher struct {
- Expected interface{}
- cachedResponse interface{}
+ Expected any
+ cachedResponse any
cachedBody []byte
}
-func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
+func (matcher *HaveHTTPBodyMatcher) Match(actual any) (bool, error) {
body, err := matcher.body(actual)
if err != nil {
return false, err
@@ -34,7 +34,7 @@ func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
}
}
-func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -52,7 +52,7 @@ func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message
}
}
-func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -73,7 +73,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m
// body returns the body. It is cached because once we read it in Match()
// the Reader is closed and it is not readable again in FailureMessage()
// or NegatedFailureMessage()
-func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
+func (matcher *HaveHTTPBodyMatcher) body(actual any) ([]byte, error) {
if matcher.cachedResponse == actual && matcher.cachedBody != nil {
return matcher.cachedBody, nil
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
index c256f452e8..756722659b 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
@@ -11,10 +11,10 @@ import (
type HaveHTTPHeaderWithValueMatcher struct {
Header string
- Value interface{}
+ Value any
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual any) (success bool, err error) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
return false, err
@@ -28,7 +28,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (succes
return headerMatcher.Match(headerValue)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string {
+func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual any) string {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -43,7 +43,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}
return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -69,7 +69,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatc
}
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual any) (string, error) {
switch r := actual.(type) {
case *http.Response:
return r.Header.Get(matcher.Header), nil
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
index 0f66e46ece..8b25b3a9f9 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -12,10 +12,10 @@ import (
)
type HaveHTTPStatusMatcher struct {
- Expected []interface{}
+ Expected []any
}
-func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPStatusMatcher) Match(actual any) (success bool, err error) {
var resp *http.Response
switch a := actual.(type) {
case *http.Response:
@@ -48,11 +48,11 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e
return false, nil
}
-func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString())
}
-func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString())
}
@@ -64,7 +64,7 @@ func (matcher *HaveHTTPStatusMatcher) expectedString() string {
return strings.Join(lines, "\n")
}
-func formatHttpResponse(input interface{}) string {
+func formatHttpResponse(input any) string {
var resp *http.Response
switch r := input.(type) {
case *http.Response:
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
index 00cffec70e..9e16dcf5d6 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
@@ -7,15 +7,16 @@ import (
"reflect"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
)
type HaveKeyMatcher struct {
- Key interface{}
+ Key any
}
-func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
- if !isMap(actual) {
- return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1))
+func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) {
+ if !isMap(actual) && !miter.IsSeq2(actual) {
+ return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
@@ -23,6 +24,20 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro
keyMatcher = &EqualMatcher{Expected: matcher.Key}
}
+ if miter.IsSeq2(actual) {
+ var success bool
+ var err error
+ miter.IterateKV(actual, func(k, v reflect.Value) bool {
+ success, err = keyMatcher.Match(k.Interface())
+ if err != nil {
+ err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
+ return false
+ }
+ return !success
+ })
+ return success, err
+ }
+
keys := reflect.ValueOf(actual).MapKeys()
for i := 0; i < len(keys); i++ {
success, err := keyMatcher.Match(keys[i].Interface())
@@ -37,7 +52,7 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) FailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "to have key matching", matcher.Key)
@@ -46,7 +61,7 @@ func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message strin
}
}
-func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "not to have key matching", matcher.Key)
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
index 4c59168047..1c53f1e56a 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
@@ -7,16 +7,17 @@ import (
"reflect"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/internal/miter"
)
type HaveKeyWithValueMatcher struct {
- Key interface{}
- Value interface{}
+ Key any
+ Value any
}
-func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
- if !isMap(actual) {
- return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1))
+func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err error) {
+ if !isMap(actual) && !miter.IsSeq2(actual) {
+ return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
@@ -29,6 +30,27 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool,
valueMatcher = &EqualMatcher{Expected: matcher.Value}
}
+ if miter.IsSeq2(actual) {
+ var success bool
+ var err error
+ miter.IterateKV(actual, func(k, v reflect.Value) bool {
+ success, err = keyMatcher.Match(k.Interface())
+ if err != nil {
+ err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
+ return false
+ }
+ if success {
+ success, err = valueMatcher.Match(v.Interface())
+ if err != nil {
+ err = fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error())
+ return false
+ }
+ }
+ return !success
+ })
+ return success, err
+ }
+
keys := reflect.ValueOf(actual).MapKeys()
for i := 0; i < len(keys); i++ {
success, err := keyMatcher.Match(keys[i].Interface())
@@ -48,7 +70,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool,
return false, nil
}
-func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual any) (message string) {
str := "to have {key: value}"
if _, ok := matcher.Key.(omegaMatcher); ok {
str += " matching"
@@ -56,12 +78,12 @@ func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (mess
str += " matching"
}
- expect := make(map[interface{}]interface{}, 1)
+ expect := make(map[any]any, 1)
expect[matcher.Key] = matcher.Value
return format.Message(actual, str, expect)
}
-func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
kStr := "not to have key"
if _, ok := matcher.Key.(omegaMatcher); ok {
kStr = "not to have key matching"
diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
index ee4276189d..c334d4c0aa 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
@@ -10,19 +10,19 @@ type HaveLenMatcher struct {
Count int
}
-func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveLenMatcher) Match(actual any) (success bool, err error) {
length, ok := lengthOf(actual)
if !ok {
- return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
+ return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1))
}
return length == matcher.Count, nil
}
-func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
index 22a1b67306..a240f1a1c7 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -11,7 +11,7 @@ import (
type HaveOccurredMatcher struct {
}
-func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveOccurredMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return false, nil
@@ -26,10 +26,10 @@ func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err
return !isNil(actual), nil
}
-func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1))
}
-func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
index 1d8e80270b..7987d41f7b 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
@@ -8,10 +8,10 @@ import (
type HavePrefixMatcher struct {
Prefix string
- Args []interface{}
+ Args []any
}
-func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HavePrefixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HavePrefixMatcher) prefix() string {
return matcher.Prefix
}
-func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have prefix", matcher.prefix())
}
-func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have prefix", matcher.prefix())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
index 40a3526eb2..2aa4ceacbc 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
@@ -8,10 +8,10 @@ import (
type HaveSuffixMatcher struct {
Suffix string
- Args []interface{}
+ Args []any
}
-func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveSuffixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HaveSuffixMatcher) suffix() string {
return matcher.Suffix
}
-func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have suffix", matcher.suffix())
}
-func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have suffix", matcher.suffix())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go
index f672528357..4c39e0db00 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_value.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -12,10 +12,10 @@ const maxIndirections = 31
type HaveValueMatcher struct {
Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
- resolvedActual interface{} // the ("resolved") value.
+ resolvedActual any // the ("resolved") value.
}
-func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+func (m *HaveValueMatcher) Match(actual any) (bool, error) {
val := reflect.ValueOf(actual)
for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
// return an error if value isn't valid. Please note that we cannot
@@ -45,10 +45,10 @@ func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
return false, errors.New(format.Message(actual, "too many indirections"))
}
-func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.resolvedActual)
}
-func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.resolvedActual)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go
new file mode 100644
index 0000000000..d8837a4d09
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go
@@ -0,0 +1,128 @@
+//go:build go1.23
+
+package miter
+
+import (
+ "reflect"
+)
+
+// HasIterators always returns false for Go versions before 1.23.
+func HasIterators() bool { return true }
+
+// IsIter returns true if the specified value is a function type that can be
+// range-d over, otherwise false.
+//
+// We don't use reflect's CanSeq and CanSeq2 directly, as these would return
+// true also for other value types that are range-able, such as integers,
+// slices, et cetera. Here, we aim only at range-able (iterator) functions.
+func IsIter(it any) bool {
+ if it == nil { // on purpose we only test for untyped nil.
+ return false
+ }
+ // reject all non-iterator-func values, even if they're range-able.
+ t := reflect.TypeOf(it)
+ if t.Kind() != reflect.Func {
+ return false
+ }
+ return t.CanSeq() || t.CanSeq2()
+}
+
+// IterKVTypes returns the reflection types of an iterator's yield function's K
+// and optional V arguments, otherwise nil K and V reflection types.
+func IterKVTypes(it any) (k, v reflect.Type) {
+ if it == nil {
+ return
+ }
+ // reject all non-iterator-func values, even if they're range-able.
+ t := reflect.TypeOf(it)
+ if t.Kind() != reflect.Func {
+ return
+ }
+ // get the reflection types for V, and where applicable, K.
+ switch {
+ case t.CanSeq():
+ v = t. /*iterator fn*/ In(0). /*yield fn*/ In(0)
+ case t.CanSeq2():
+ yieldfn := t. /*iterator fn*/ In(0)
+ k = yieldfn.In(0)
+ v = yieldfn.In(1)
+ }
+ return
+}
+
+// IsSeq2 returns true if the passed iterator function is compatible with
+// iter.Seq2, otherwise false.
+//
+// IsSeq2 hides the Go 1.23+ specific reflect.Type.CanSeq2 behind a facade which
+// is empty for Go versions before 1.23.
+func IsSeq2(it any) bool {
+ if it == nil {
+ return false
+ }
+ t := reflect.TypeOf(it)
+ return t.Kind() == reflect.Func && t.CanSeq2()
+}
+
+// isNilly returns true if v is either an untyped nil, or is a nil function (not
+// necessarily an iterator function).
+func isNilly(v any) bool {
+ if v == nil {
+ return true
+ }
+ rv := reflect.ValueOf(v)
+ return rv.Kind() == reflect.Func && rv.IsNil()
+}
+
+// IterateV loops over the elements produced by an iterator function, passing
+// the elements to the specified yield function individually and stopping only
+// when either the iterator function runs out of elements or the yield function
+// tell us to stop it.
+//
+// IterateV works very much like reflect.Value.Seq but hides the Go 1.23+
+// specific parts behind a facade which is empty for Go versions before 1.23, in
+// order to simplify code maintenance for matchers when using older Go versions.
+func IterateV(it any, yield func(v reflect.Value) bool) {
+ if isNilly(it) {
+ return
+ }
+ // reject all non-iterator-func values, even if they're range-able.
+ t := reflect.TypeOf(it)
+ if t.Kind() != reflect.Func || !t.CanSeq() {
+ return
+ }
+ // Call the specified iterator function, handing it our adaptor to call the
+ // specified generic reflection yield function.
+ reflectedYield := reflect.MakeFunc(
+ t. /*iterator fn*/ In(0),
+ func(args []reflect.Value) []reflect.Value {
+ return []reflect.Value{reflect.ValueOf(yield(args[0]))}
+ })
+ reflect.ValueOf(it).Call([]reflect.Value{reflectedYield})
+}
+
+// IterateKV loops over the key-value elements produced by an iterator function,
+// passing the elements to the specified yield function individually and
+// stopping only when either the iterator function runs out of elements or the
+// yield function tell us to stop it.
+//
+// IterateKV works very much like reflect.Value.Seq2 but hides the Go 1.23+
+// specific parts behind a facade which is empty for Go versions before 1.23, in
+// order to simplify code maintenance for matchers when using older Go versions.
+func IterateKV(it any, yield func(k, v reflect.Value) bool) {
+ if isNilly(it) {
+ return
+ }
+ // reject all non-iterator-func values, even if they're range-able.
+ t := reflect.TypeOf(it)
+ if t.Kind() != reflect.Func || !t.CanSeq2() {
+ return
+ }
+ // Call the specified iterator function, handing it our adaptor to call the
+ // specified generic reflection yield function.
+ reflectedYield := reflect.MakeFunc(
+ t. /*iterator fn*/ In(0),
+ func(args []reflect.Value) []reflect.Value {
+ return []reflect.Value{reflect.ValueOf(yield(args[0], args[1]))}
+ })
+ reflect.ValueOf(it).Call([]reflect.Value{reflectedYield})
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go
new file mode 100644
index 0000000000..4b8fcc55bd
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go
@@ -0,0 +1,44 @@
+//go:build !go1.23
+
+/*
+Gomega matchers
+
+This package implements the Gomega matchers and does not typically need to be imported.
+See the docs for Gomega for documentation on the matchers
+
+http://onsi.github.io/gomega/
+*/
+
+package miter
+
+import "reflect"
+
+// HasIterators always returns false for Go versions before 1.23.
+func HasIterators() bool { return false }
+
+// IsIter always returns false for Go versions before 1.23 as there is no
+// iterator (function) pattern defined yet; see also:
+// https://tip.golang.org/blog/range-functions.
+func IsIter(i any) bool { return false }
+
+// IsSeq2 always returns false for Go versions before 1.23 as there is no
+// iterator (function) pattern defined yet; see also:
+// https://tip.golang.org/blog/range-functions.
+func IsSeq2(it any) bool { return false }
+
+// IterKVTypes always returns nil reflection types for Go versions before 1.23
+// as there is no iterator (function) pattern defined yet; see also:
+// https://tip.golang.org/blog/range-functions.
+func IterKVTypes(i any) (k, v reflect.Type) {
+ return
+}
+
+// IterateV never loops over what has been passed to it as an iterator for Go
+// versions before 1.23 as there is no iterator (function) pattern defined yet;
+// see also: https://tip.golang.org/blog/range-functions.
+func IterateV(it any, yield func(v reflect.Value) bool) {}
+
+// IterateKV never loops over what has been passed to it as an iterator for Go
+// versions before 1.23 as there is no iterator (function) pattern defined yet;
+// see also: https://tip.golang.org/blog/range-functions.
+func IterateKV(it any, yield func(k, v reflect.Value) bool) {}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
index c539dd389c..f9d313772f 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -71,14 +71,14 @@ func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) {
format.Object(expected, 1))
}
-func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) FailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0]))
}
return format.Message(actual, "to match error", matcher.Expected)
}
-func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0]))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
index f962f139ff..331f289abc 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
@@ -9,18 +9,18 @@ import (
)
type MatchJSONMatcher struct {
- JSONToMatch interface{}
- firstFailurePath []interface{}
+ JSONToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchJSONMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.prettyPrint(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
// this is guarded by prettyPrint
json.Unmarshal([]byte(actualString), &aval)
@@ -30,17 +30,17 @@ func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchJSONMatcher) prettyPrint(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
index adac5db6b8..779be683e0 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
@@ -9,10 +9,10 @@ import (
type MatchRegexpMatcher struct {
Regexp string
- Args []interface{}
+ Args []any
}
-func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchRegexpMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
@@ -26,11 +26,11 @@ func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err
return match, nil
}
-func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to match regular expression", matcher.regexp())
}
-func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to match regular expression", matcher.regexp())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
index 5c815f5af7..f7dcaf6fdc 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
@@ -15,10 +15,10 @@ import (
)
type MatchXMLMatcher struct {
- XMLToMatch interface{}
+ XMLToMatch any
}
-func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchXMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.formattedPrint(actual)
if err != nil {
return false, err
@@ -37,17 +37,17 @@ func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err err
return reflect.DeepEqual(aval, eval), nil
}
-func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) {
+func (matcher *MatchXMLMatcher) formattedPrint(actual any) (actualString, expectedString string, err error) {
var ok bool
actualString, ok = toString(actual)
if !ok {
diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
index 2cb6b47db9..c3da9bd48b 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
@@ -5,22 +5,22 @@ import (
"strings"
"github.com/onsi/gomega/format"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
)
type MatchYAMLMatcher struct {
- YAMLToMatch interface{}
- firstFailurePath []interface{}
+ YAMLToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchYAMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil {
return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err)
@@ -34,23 +34,23 @@ func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
return normalise(actualString), normalise(expectedString), err
}
func normalise(input string) string {
- var val interface{}
+ var val any
err := yaml.Unmarshal([]byte(input), &val)
if err != nil {
panic(err) // unreachable since Match already calls Unmarshal
@@ -62,7 +62,7 @@ func normalise(input string) string {
return strings.TrimSpace(string(output))
}
-func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go
index 78b71910d1..c598b7899a 100644
--- a/vendor/github.com/onsi/gomega/matchers/not.go
+++ b/vendor/github.com/onsi/gomega/matchers/not.go
@@ -8,7 +8,7 @@ type NotMatcher struct {
Matcher types.GomegaMatcher
}
-func (m *NotMatcher) Match(actual interface{}) (bool, error) {
+func (m *NotMatcher) Match(actual any) (bool, error) {
success, err := m.Matcher.Match(actual)
if err != nil {
return false, err
@@ -16,14 +16,14 @@ func (m *NotMatcher) Match(actual interface{}) (bool, error) {
return !success, nil
}
-func (m *NotMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) FailureMessage(actual any) (message string) {
return m.Matcher.NegatedFailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) NegatedFailureMessage(actual any) (message string) {
return m.Matcher.FailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *NotMatcher) MatchMayChangeInTheFuture(actual any) bool {
return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
}
diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go
index 841ae26ab0..6578404b0e 100644
--- a/vendor/github.com/onsi/gomega/matchers/or.go
+++ b/vendor/github.com/onsi/gomega/matchers/or.go
@@ -14,7 +14,7 @@ type OrMatcher struct {
firstSuccessfulMatcher types.GomegaMatcher
}
-func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *OrMatcher) Match(actual any) (success bool, err error) {
m.firstSuccessfulMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
@@ -29,16 +29,16 @@ func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
return false, nil
}
-func (m *OrMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) FailureMessage(actual any) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers))
}
-func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) NegatedFailureMessage(actual any) (message string) {
return m.firstSuccessfulMatcher.NegatedFailureMessage(actual)
}
-func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *OrMatcher) MatchMayChangeInTheFuture(actual any) bool {
/*
Example with 3 matchers: A, B, C
diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
index adc8cee630..8be5a7ccf3 100644
--- a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
@@ -8,11 +8,11 @@ import (
)
type PanicMatcher struct {
- Expected interface{}
- object interface{}
+ Expected any
+ object any
}
-func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *PanicMatcher) Match(actual any) (success bool, err error) {
if actual == nil {
return false, fmt.Errorf("PanicMatcher expects a non-nil actual.")
}
@@ -52,7 +52,7 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error)
return
}
-func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) FailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We wanted any panic to occur, but none did.
return format.Message(actual, "to panic")
@@ -91,7 +91,7 @@ func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string)
}
}
-func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We didn't want any panic to occur, but one did.
return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1)))
diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
index 1936a2ba52..1d9f61d636 100644
--- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -3,6 +3,7 @@
package matchers
import (
+ "errors"
"fmt"
"reflect"
@@ -10,12 +11,12 @@ import (
)
type ReceiveMatcher struct {
- Arg interface{}
+ Args []any
receivedValue reflect.Value
channelClosed bool
}
-func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ReceiveMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
var subMatcher omegaMatcher
var hasSubMatcher bool
-
- if matcher.Arg != nil {
- subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher)
+ var resultReference any
+
+ // Valid arg formats are as follows, always with optional POINTER before
+ // optional MATCHER:
+ // - Receive()
+ // - Receive(POINTER)
+ // - Receive(MATCHER)
+ // - Receive(POINTER, MATCHER)
+ args := matcher.Args
+ if len(args) > 0 {
+ arg := args[0]
+ _, isSubMatcher := arg.(omegaMatcher)
+ if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr {
+ // Consume optional POINTER arg first, if it ain't no matcher ;)
+ resultReference = arg
+ args = args[1:]
+ }
+ }
+ if len(args) > 0 {
+ arg := args[0]
+ subMatcher, hasSubMatcher = arg.(omegaMatcher)
if !hasSubMatcher {
- argType := reflect.TypeOf(matcher.Arg)
- if argType.Kind() != reflect.Ptr {
- return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1))
- }
+ // At this point we assume the dev user wanted to assign a received
+ // value, so [POINTER,]MATCHER.
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1))
}
+ // Consume optional MATCHER arg.
+ args = args[1:]
+ }
+ if len(args) > 0 {
+ // If there are still args present, reject all.
+ return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher")
}
winnerIndex, value, open := reflect.Select([]reflect.SelectCase{
@@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
}
if hasSubMatcher {
- if didReceive {
- matcher.receivedValue = value
- return subMatcher.Match(matcher.receivedValue.Interface())
+ if !didReceive {
+ return false, nil
}
- return false, nil
+ matcher.receivedValue = value
+ if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match {
+ return match, err
+ }
+ // if we received a match, then fall through in order to handle an
+ // optional assignment of the received value to the specified reference.
}
if didReceive {
- if matcher.Arg != nil {
- outValue := reflect.ValueOf(matcher.Arg)
+ if resultReference != nil {
+ outValue := reflect.ValueOf(resultReference)
if value.Type().AssignableTo(outValue.Elem().Type()) {
outValue.Elem().Set(value)
@@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
outValue.Elem().Set(value.Elem())
return true, nil
} else {
- return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1))
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1))
}
}
@@ -87,8 +115,12 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
- subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+func (matcher *ReceiveMatcher) FailureMessage(actual any) (message string) {
+ var matcherArg any
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
closedAddendum := ""
if matcher.channelClosed {
@@ -104,8 +136,12 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin
return format.Message(actual, "to receive something."+closedAddendum)
}
-func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+func (matcher *ReceiveMatcher) NegatedFailureMessage(actual any) (message string) {
+ var matcherArg any
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
closedAddendum := ""
if matcher.channelClosed {
@@ -121,7 +157,7 @@ func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (messag
return format.Message(actual, "not to receive anything."+closedAddendum)
}
-func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual any) bool {
if !isChan(actual) {
return false
}
diff --git a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
index ec68fe8b62..2adc4825aa 100644
--- a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
@@ -8,13 +8,13 @@ import (
)
type SatisfyMatcher struct {
- Predicate interface{}
+ Predicate any
// cached type
predicateArgType reflect.Type
}
-func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
+func NewSatisfyMatcher(predicate any) *SatisfyMatcher {
if predicate == nil {
panic("predicate cannot be nil")
}
@@ -35,7 +35,7 @@ func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
}
}
-func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *SatisfyMatcher) Match(actual any) (success bool, err error) {
// prepare a parameter to pass to the predicate
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) {
@@ -57,10 +57,10 @@ func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
return result[0].Bool(), nil
}
-func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to satisfy predicate", m.Predicate)
}
-func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "to not satisfy predicate", m.Predicate)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
index 1369c1e87f..30dd58f4a5 100644
--- a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
+++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
@@ -8,7 +8,7 @@ import (
"strings"
)
-func formattedMessage(comparisonMessage string, failurePath []interface{}) string {
+func formattedMessage(comparisonMessage string, failurePath []any) string {
var diffMessage string
if len(failurePath) == 0 {
diffMessage = ""
@@ -18,7 +18,7 @@ func formattedMessage(comparisonMessage string, failurePath []interface{}) strin
return fmt.Sprintf("%s%s", comparisonMessage, diffMessage)
}
-func formattedFailurePath(failurePath []interface{}) string {
+func formattedFailurePath(failurePath []any) string {
formattedPaths := []string{}
for i := len(failurePath) - 1; i >= 0; i-- {
switch p := failurePath[i].(type) {
@@ -34,33 +34,33 @@ func formattedFailurePath(failurePath []interface{}) string {
return strings.Join(formattedPaths, "")
}
-func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
- var errorPath []interface{}
+func deepEqual(a any, b any) (bool, []any) {
+ var errorPath []any
if reflect.TypeOf(a) != reflect.TypeOf(b) {
return false, errorPath
}
switch a.(type) {
- case []interface{}:
- if len(a.([]interface{})) != len(b.([]interface{})) {
+ case []any:
+ if len(a.([]any)) != len(b.([]any)) {
return false, errorPath
}
- for i, v := range a.([]interface{}) {
- elementEqual, keyPath := deepEqual(v, b.([]interface{})[i])
+ for i, v := range a.([]any) {
+ elementEqual, keyPath := deepEqual(v, b.([]any)[i])
if !elementEqual {
return false, append(keyPath, i)
}
}
return true, errorPath
- case map[interface{}]interface{}:
- if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) {
+ case map[any]any:
+ if len(a.(map[any]any)) != len(b.(map[any]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[interface{}]interface{}) {
- v2, ok := b.(map[interface{}]interface{})[k]
+ for k, v1 := range a.(map[any]any) {
+ v2, ok := b.(map[any]any)[k]
if !ok {
return false, errorPath
}
@@ -71,13 +71,13 @@ func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
}
return true, errorPath
- case map[string]interface{}:
- if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) {
+ case map[string]any:
+ if len(a.(map[string]any)) != len(b.(map[string]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[string]interface{}) {
- v2, ok := b.(map[string]interface{})[k]
+ for k, v1 := range a.(map[string]any) {
+ v2, ok := b.(map[string]any)[k]
if !ok {
return false, errorPath
}
diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
index 327350f7b7..f0b2c4aa66 100644
--- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -14,7 +14,7 @@ type formattedGomegaError interface {
type SucceedMatcher struct {
}
-func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *SucceedMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return true, nil
@@ -29,7 +29,7 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro
return isNil(actual), nil
}
-func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) FailureMessage(actual any) (message string) {
var fgErr formattedGomegaError
if errors.As(actual.(error), &fgErr) {
return fgErr.FormattedGomegaError()
@@ -37,6 +37,6 @@ func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message strin
return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1))
}
-func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) NegatedFailureMessage(actual any) (message string) {
return "Expected failure, but got no error."
}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
index 830e308274..0d78779d47 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -11,7 +11,7 @@ type BipartiteGraph struct {
Edges EdgeSet
}
-func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
+func NewBipartiteGraph(leftValues, rightValues []any, neighbours func(any, any) (bool, error)) (*BipartiteGraph, error) {
left := NodeOrderedSet{}
for i, v := range leftValues {
left = append(left, Node{ID: i, Value: v})
@@ -41,7 +41,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
// FreeLeftRight returns left node values and right node values
// of the BipartiteGraph's nodes which are not part of the given edges.
-func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) {
+func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []any) {
for _, node := range bg.Left {
if edges.Free(node) {
leftValues = append(leftValues, node.Value)
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
index 1c54edd8f1..44aa61d4b3 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
@@ -1,6 +1,8 @@
package bipartitegraph
import (
+ "slices"
+
. "github.com/onsi/gomega/matchers/support/goraph/edge"
. "github.com/onsi/gomega/matchers/support/goraph/node"
"github.com/onsi/gomega/matchers/support/goraph/util"
@@ -157,6 +159,11 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [
if len(currentLayer) == 0 {
return []NodeOrderedSet{}
}
+ if done { // if last layer - into last layer must be only 'free' nodes
+ currentLayer = slices.DeleteFunc(currentLayer, func(in Node) bool {
+ return !matching.Free(in)
+ })
+ }
guideLayers = append(guideLayers, currentLayer)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
index cd597a2f22..66d3578d51 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
@@ -2,7 +2,7 @@ package node
type Node struct {
ID int
- Value interface{}
+ Value any
}
type NodeOrderedSet []Node
diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go
index dced2419ea..d020dedc30 100644
--- a/vendor/github.com/onsi/gomega/matchers/type_support.go
+++ b/vendor/github.com/onsi/gomega/matchers/type_support.go
@@ -15,19 +15,21 @@ import (
"encoding/json"
"fmt"
"reflect"
+
+ "github.com/onsi/gomega/matchers/internal/miter"
)
type omegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
-func isBool(a interface{}) bool {
+func isBool(a any) bool {
return reflect.TypeOf(a).Kind() == reflect.Bool
}
-func isNumber(a interface{}) bool {
+func isNumber(a any) bool {
if a == nil {
return false
}
@@ -35,22 +37,22 @@ func isNumber(a interface{}) bool {
return reflect.Int <= kind && kind <= reflect.Float64
}
-func isInteger(a interface{}) bool {
+func isInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Int <= kind && kind <= reflect.Int64
}
-func isUnsignedInteger(a interface{}) bool {
+func isUnsignedInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Uint <= kind && kind <= reflect.Uint64
}
-func isFloat(a interface{}) bool {
+func isFloat(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Float32 <= kind && kind <= reflect.Float64
}
-func toInteger(a interface{}) int64 {
+func toInteger(a any) int64 {
if isInteger(a) {
return reflect.ValueOf(a).Int()
} else if isUnsignedInteger(a) {
@@ -61,7 +63,7 @@ func toInteger(a interface{}) int64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toUnsignedInteger(a interface{}) uint64 {
+func toUnsignedInteger(a any) uint64 {
if isInteger(a) {
return uint64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -72,7 +74,7 @@ func toUnsignedInteger(a interface{}) uint64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toFloat(a interface{}) float64 {
+func toFloat(a any) float64 {
if isInteger(a) {
return float64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -83,26 +85,26 @@ func toFloat(a interface{}) float64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func isError(a interface{}) bool {
+func isError(a any) bool {
_, ok := a.(error)
return ok
}
-func isChan(a interface{}) bool {
+func isChan(a any) bool {
if isNil(a) {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Chan
}
-func isMap(a interface{}) bool {
+func isMap(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Map
}
-func isArrayOrSlice(a interface{}) bool {
+func isArrayOrSlice(a any) bool {
if a == nil {
return false
}
@@ -114,14 +116,14 @@ func isArrayOrSlice(a interface{}) bool {
}
}
-func isString(a interface{}) bool {
+func isString(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.String
}
-func toString(a interface{}) (string, bool) {
+func toString(a any) (string, bool) {
aString, isString := a.(string)
if isString {
return aString, true
@@ -145,18 +147,29 @@ func toString(a interface{}) (string, bool) {
return "", false
}
-func lengthOf(a interface{}) (int, bool) {
+func lengthOf(a any) (int, bool) {
if a == nil {
return 0, false
}
switch reflect.TypeOf(a).Kind() {
case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice:
return reflect.ValueOf(a).Len(), true
+ case reflect.Func:
+ if !miter.IsIter(a) {
+ return 0, false
+ }
+ var l int
+ if miter.IsSeq2(a) {
+ miter.IterateKV(a, func(k, v reflect.Value) bool { l++; return true })
+ } else {
+ miter.IterateV(a, func(v reflect.Value) bool { l++; return true })
+ }
+ return l, true
default:
return 0, false
}
}
-func capOf(a interface{}) (int, bool) {
+func capOf(a any) (int, bool) {
if a == nil {
return 0, false
}
@@ -168,7 +181,7 @@ func capOf(a interface{}) (int, bool) {
}
}
-func isNil(a interface{}) bool {
+func isNil(a any) bool {
if a == nil {
return true
}
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 6f743b1b32..6231c3b476 100644
--- a/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -9,20 +9,20 @@ import (
type WithTransformMatcher struct {
// input
- Transform interface{} // must be a function of one parameter that returns one value and an optional error
+ Transform any // must be a function of one parameter that returns one value and an optional error
Matcher types.GomegaMatcher
// cached value
transformArgType reflect.Type
// state
- transformedValue interface{}
+ transformedValue any
}
// reflect.Type for error
var errorT = reflect.TypeOf((*error)(nil)).Elem()
-func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
+func NewWithTransformMatcher(transform any, matcher types.GomegaMatcher) *WithTransformMatcher {
if transform == nil {
panic("transform function cannot be nil")
}
@@ -43,7 +43,7 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
}
}
-func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
+func (m *WithTransformMatcher) Match(actual any) (bool, error) {
// prepare a parameter to pass to the Transform function
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) {
@@ -72,15 +72,15 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
return m.Matcher.Match(m.transformedValue)
}
-func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
+func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ any) bool {
// TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.)
//
// Querying the next matcher is fine if the transformer always will return the same value.
diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go
index 7c7adb9415..685a46f373 100644
--- a/vendor/github.com/onsi/gomega/types/types.go
+++ b/vendor/github.com/onsi/gomega/types/types.go
@@ -10,34 +10,36 @@ type GomegaFailHandler func(message string, callerSkip ...int)
// A simple *testing.T interface wrapper
type GomegaTestingT interface {
Helper()
- Fatalf(format string, args ...interface{})
+ Fatalf(format string, args ...any)
}
-// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers
+// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers
type Gomega interface {
- Ω(actual interface{}, extra ...interface{}) Assertion
- Expect(actual interface{}, extra ...interface{}) Assertion
- ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
+ Ω(actual any, extra ...any) Assertion
+ Expect(actual any, extra ...any) Assertion
+ ExpectWithOffset(offset int, actual any, extra ...any) Assertion
- Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Eventually(actualOrCtx any, args ...any) AsyncAssertion
+ EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
- Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Consistently(actualOrCtx any, args ...any) AsyncAssertion
+ ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
SetDefaultEventuallyTimeout(time.Duration)
SetDefaultEventuallyPollingInterval(time.Duration)
SetDefaultConsistentlyDuration(time.Duration)
SetDefaultConsistentlyPollingInterval(time.Duration)
+ EnforceDefaultTimeoutsWhenUsingContexts()
+ DisableDefaultTimeoutsWhenUsingContext()
}
// All Gomega matchers must implement the GomegaMatcher interface
//
// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
type GomegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
/*
@@ -50,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re
for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
*/
type OracleMatcher interface {
- MatchMayChangeInTheFuture(actual interface{}) bool
+ MatchMayChangeInTheFuture(actual any) bool
}
-func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
+func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool {
oracleMatcher, ok := matcher.(OracleMatcher)
if !ok {
return true
@@ -65,8 +67,13 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
// AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure
// they are eventually satisfied
type AsyncAssertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
+
+ // equivalent to above
+ To(matcher GomegaMatcher, optionalDescription ...any) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...any) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) AsyncAssertion
WithTimeout(interval time.Duration) AsyncAssertion
@@ -74,18 +81,18 @@ type AsyncAssertion interface {
Within(timeout time.Duration) AsyncAssertion
ProbeEvery(interval time.Duration) AsyncAssertion
WithContext(ctx context.Context) AsyncAssertion
- WithArguments(argsToForward ...interface{}) AsyncAssertion
+ WithArguments(argsToForward ...any) AsyncAssertion
MustPassRepeatedly(count int) AsyncAssertion
}
// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
type Assertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
- To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ To(matcher GomegaMatcher, optionalDescription ...any) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...any) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) Assertion
diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml
new file mode 100644
index 0000000000..8902bdaaff
--- /dev/null
+++ b/vendor/github.com/x448/float16/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.11.x
+
+env:
+ - GO111MODULE=on
+
+script:
+ - go test -short -coverprofile=coverage.txt -covermode=count ./...
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE
new file mode 100644
index 0000000000..bf6e357854
--- /dev/null
+++ b/vendor/github.com/x448/float16/LICENSE
@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md
new file mode 100644
index 0000000000..b524b8135d
--- /dev/null
+++ b/vendor/github.com/x448/float16/README.md
@@ -0,0 +1,133 @@
+# Float16 (Binary16) in Go/Golang
+[](https://travis-ci.org/x448/float16)
+[](https://codecov.io/gh/x448/float16)
+[](https://goreportcard.com/report/github.com/x448/float16)
+[](https://github.com/x448/float16/releases)
+[](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
+
+`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
+
+IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
+
+All possible 4+ billion floating-point conversions with this library are verified to be correct.
+
+Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
+
+## Features
+Current features include:
+
+* float16 to float32 conversions use lossless conversion.
+* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
+* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
+* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
+* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
+* all functions in this library use zero allocs except String().
+
+## Status
+This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
+
+Current status:
+
+* core API is done and breaking API changes are unlikely.
+* 100% of unit tests pass:
+ * short mode (`go test -short`) tests around 65765 conversions in 0.005s.
+ * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
+* 100% code coverage with both short mode and normal mode.
+* tested on amd64 but it should work on all little-endian platforms supported by Go.
+
+Roadmap:
+
+* add functions for fast batch conversions leveraging SIMD when supported by hardware.
+* speed up unit test when verifying all possible 4+ billion conversions.
+* test on additional platforms.
+
+## Float16 to Float32 Conversion
+Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
+
+Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
+
+## Float32 to Float16 Conversion
+Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
+
+Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
+
+Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
+
+## Usage
+Install with `go get github.com/x448/float16`.
+```
+// Convert float32 to float16
+pi := float32(math.Pi)
+pi16 := float16.Fromfloat32(pi)
+
+// Convert float16 to float32
+pi32 := pi16.Float32()
+
+// PrecisionFromfloat32() is faster than the overhead of calling a function.
+// This example only converts if there's no data loss and input is not a subnormal.
+if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
+ pi16 := float16.Fromfloat32(pi)
+}
+```
+
+## Float16 Type and API
+Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
+```
+package float16 // import "github.com/x448/float16"
+
+// Exported types and consts
+type Float16 uint16
+const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
+
+// Exported functions
+Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
+ with identical results to AMD and Intel F16C hardware. NaN inputs
+ are converted with quiet bit always set on, to be like F16C.
+
+FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
+ // The "ps" suffix means "preserve signaling".
+ // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
+
+Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
+NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
+Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
+
+PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
+ // (inline and < 1 ns/op)
+// Exported methods
+(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
+(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
+(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
+(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
+(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
+(f Float16) IsFinite() bool // true if f is not infinite or NaN
+(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
+(f Float16) Signbit() bool // true if f is negative or negative zero
+(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
+```
+See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
+
+## Benchmarks
+Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
+
+```
+All functions have zero allocations except float16.String().
+
+FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
+ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
+Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
+
+PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
+```
+
+## System Requirements
+* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
+* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
+
+## Special Thanks
+Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
+
+## License
+Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+
+Licensed under [MIT License](LICENSE)
diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go
new file mode 100644
index 0000000000..1a0e6dad00
--- /dev/null
+++ b/vendor/github.com/x448/float16/float16.go
@@ -0,0 +1,302 @@
+// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
+//
+// Special thanks to Kathryn Long for her Rust implementation
+// of float16 at github.com/starkat99/half-rs (MIT license)
+
+package float16
+
+import (
+ "math"
+ "strconv"
+)
+
+// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
+type Float16 uint16
+
+// Precision indicates whether the conversion to Float16 is
+// exact, subnormal without dropped bits, inexact, underflow, or overflow.
+type Precision int
+
+const (
+
+ // PrecisionExact is for non-subnormals that don't drop bits during conversion.
+ // All of these can round-trip. Should always convert to float16.
+ PrecisionExact Precision = iota
+
+ // PrecisionUnknown is for subnormals that don't drop bits during conversion but
+ // not all of these can round-trip so precision is unknown without more effort.
+ // Only 2046 of these can round-trip and the rest cannot round-trip.
+ PrecisionUnknown
+
+ // PrecisionInexact is for dropped significand bits and cannot round-trip.
+ // Some of these are subnormals. Cannot round-trip float32->float16->float32.
+ PrecisionInexact
+
+ // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
+ PrecisionUnderflow
+
+ // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
+ PrecisionOverflow
+)
+
+// PrecisionFromfloat32 returns Precision without performing
+// the conversion. Conversions from both Infinity and NaN
+// values will always report PrecisionExact even if NaN payload
+// or NaN-Quiet-Bit is lost. This function is kept simple to
+// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
+func PrecisionFromfloat32(f32 float32) Precision {
+ u32 := math.Float32bits(f32)
+
+ if u32 == 0 || u32 == 0x80000000 {
+ // +- zero will always be exact conversion
+ return PrecisionExact
+ }
+
+ const COEFMASK uint32 = 0x7fffff // 23 least significant bits
+ const EXPSHIFT uint32 = 23
+ const EXPBIAS uint32 = 127
+ const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
+ const DROPMASK uint32 = COEFMASK >> 10
+
+ exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
+ coef := u32 & COEFMASK
+
+ if exp == 128 {
+ // +- infinity or NaN
+ // apps may want to do extra checks for NaN separately
+ return PrecisionExact
+ }
+
+ // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
+ // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
+ if exp < -24 {
+ return PrecisionUnderflow
+ }
+ if exp > 15 {
+ return PrecisionOverflow
+ }
+ if (coef & DROPMASK) != uint32(0) {
+ // these include subnormals and non-subnormals that dropped bits
+ return PrecisionInexact
+ }
+
+ if exp < -14 {
+ // Subnormals. Caller may want to test these further.
+ // There are 2046 subnormals that can successfully round-trip f32->f16->f32
+ // and 20 of those 2046 have 32-bit input coef == 0.
+ // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
+ // so some protocols and libraries will choose to handle subnormals differently
+ // when deciding to encode them to CBOR float32 vs float16.
+ return PrecisionUnknown
+ }
+
+ return PrecisionExact
+}
+
+// Frombits returns the float16 number corresponding to the IEEE 754 binary16
+// representation u16, with the sign bit of u16 and the result in the same bit
+// position. Frombits(Bits(x)) == x.
+func Frombits(u16 uint16) Float16 {
+ return Float16(u16)
+}
+
+// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
+// IEEE default rounding (nearest int, with ties to even).
+func Fromfloat32(f32 float32) Float16 {
+ return Float16(f32bitsToF16bits(math.Float32bits(f32)))
+}
+
+// ErrInvalidNaNValue indicates a NaN was not received.
+const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
+
+type float16Error string
+
+func (e float16Error) Error() string { return string(e) }
+
+// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
+// signaling and payload. Unlike Fromfloat32(), which can only return
+// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
+// If the result is infinity (sNaN with empty payload), then the
+// lowest bit of payload is set to make the result a NaN.
+// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
+// This function was kept simple to be able to inline.
+func FromNaN32ps(nan float32) (Float16, error) {
+ const SNAN = Float16(uint16(0x7c01)) // signalling NaN
+
+ u32 := math.Float32bits(nan)
+ sign := u32 & 0x80000000
+ exp := u32 & 0x7f800000
+ coef := u32 & 0x007fffff
+
+ if (exp != 0x7f800000) || (coef == 0) {
+ return SNAN, ErrInvalidNaNValue
+ }
+
+ u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
+
+ if (u16 & 0x03ff) == 0 {
+ // result became infinity, make it NaN by setting lowest bit in payload
+ u16 = u16 | 0x0001
+ }
+
+ return Float16(u16), nil
+}
+
+// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
+// Returned NaN value 0x7e01 has all exponent bits = 1 with the
+// first and last bits = 1 in the significand. This is consistent
+// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
+func NaN() Float16 {
+ return Float16(0x7e01)
+}
+
+// Inf returns a Float16 with an infinity value with the specified sign.
+// A sign >= returns positive infinity.
+// A sign < 0 returns negative infinity.
+func Inf(sign int) Float16 {
+ if sign >= 0 {
+ return Float16(0x7c00)
+ }
+ return Float16(0x8000 | 0x7c00)
+}
+
+// Float32 returns a float32 converted from f (Float16).
+// This is a lossless conversion.
+func (f Float16) Float32() float32 {
+ u32 := f16bitsToF32bits(uint16(f))
+ return math.Float32frombits(u32)
+}
+
+// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
+// of f and the result in the same bit position. Bits(Frombits(x)) == x.
+func (f Float16) Bits() uint16 {
+ return uint16(f)
+}
+
+// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
+func (f Float16) IsNaN() bool {
+ return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
+}
+
+// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
+// “not-a-number” value.
+func (f Float16) IsQuietNaN() bool {
+ return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
+}
+
+// IsInf reports whether f is an infinity (inf).
+// A sign > 0 reports whether f is positive inf.
+// A sign < 0 reports whether f is negative inf.
+// A sign == 0 reports whether f is either inf.
+func (f Float16) IsInf(sign int) bool {
+ return ((f == 0x7c00) && sign >= 0) ||
+ (f == 0xfc00 && sign <= 0)
+}
+
+// IsFinite returns true if f is neither infinite nor NaN.
+func (f Float16) IsFinite() bool {
+ return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
+}
+
+// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
+func (f Float16) IsNormal() bool {
+ exp := uint16(f) & uint16(0x7c00)
+ return (exp != uint16(0x7c00)) && (exp != 0)
+}
+
+// Signbit reports whether f is negative or negative zero.
+func (f Float16) Signbit() bool {
+ return (uint16(f) & uint16(0x8000)) != 0
+}
+
+// String satisfies the fmt.Stringer interface.
+func (f Float16) String() string {
+ return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
+}
+
+// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
+func f16bitsToF32bits(in uint16) uint32 {
+ // All 65536 conversions with this were confirmed to be correct
+ // by Montgomery Edwards⁴⁴⁸ (github.com/x448).
+
+ sign := uint32(in&0x8000) << 16 // sign for 32-bit
+ exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
+ coef := uint32(in&0x03ff) << 13 // significand for 32-bit
+
+ if exp == 0x1f {
+ if coef == 0 {
+ // infinity
+ return sign | 0x7f800000 | coef
+ }
+ // NaN
+ return sign | 0x7fc00000 | coef
+ }
+
+ if exp == 0 {
+ if coef == 0 {
+ // zero
+ return sign
+ }
+
+ // normalize subnormal numbers
+ exp++
+ for coef&0x7f800000 == 0 {
+ coef <<= 1
+ exp--
+ }
+ coef &= 0x007fffff
+ }
+
+ return sign | ((exp + (0x7f - 0xf)) << 23) | coef
+}
+
+// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
+// Conversion rounds to nearest integer with ties to even.
+func f32bitsToF16bits(u32 uint32) uint16 {
+ // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
+ // All 4294967296 conversions with this were confirmed to be correct by x448.
+ // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
+
+ sign := u32 & 0x80000000
+ exp := u32 & 0x7f800000
+ coef := u32 & 0x007fffff
+
+ if exp == 0x7f800000 {
+ // NaN or Infinity
+ nanBit := uint32(0)
+ if coef != 0 {
+ nanBit = uint32(0x0200)
+ }
+ return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
+ }
+
+ halfSign := sign >> 16
+
+ unbiasedExp := int32(exp>>23) - 127
+ halfExp := unbiasedExp + 15
+
+ if halfExp >= 0x1f {
+ return uint16(halfSign | uint32(0x7c00))
+ }
+
+ if halfExp <= 0 {
+ if 14-halfExp > 24 {
+ return uint16(halfSign)
+ }
+ coef := coef | uint32(0x00800000)
+ halfCoef := coef >> uint32(14-halfExp)
+ roundBit := uint32(1) << uint32(13-halfExp)
+ if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
+ halfCoef++
+ }
+ return uint16(halfSign | halfCoef)
+ }
+
+ uHalfExp := uint32(halfExp) << 10
+ halfCoef := coef >> 13
+ roundBit := uint32(0x00001000)
+ if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
+ return uint16((halfSign | uHalfExp | halfCoef) + 1)
+ }
+ return uint16(halfSign | uHalfExp | halfCoef)
+}
diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml
new file mode 100644
index 0000000000..7348c50c0c
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE
rename to vendor/go.yaml.in/yaml/v2/LICENSE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
rename to vendor/go.yaml.in/yaml/v2/NOTICE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md
similarity index 76%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
rename to vendor/go.yaml.in/yaml/v2/README.md
index 53f4139dc3..c9388da425 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
+++ b/vendor/go.yaml.in/yaml/v2/README.md
@@ -1,13 +1,3 @@
-# go-yaml fork
-
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
-
-This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
-
# YAML support for the Go language
Introduction
@@ -30,18 +20,16 @@ supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
-The import path for the package is *gopkg.in/yaml.v2*.
+The import path for the package is *go.yaml.in/yaml/v2*.
To install it, run:
- go get gopkg.in/yaml.v2
+ go get go.yaml.in/yaml/v2
API documentation
-----------------
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+See:
API stability
-------------
@@ -65,7 +53,7 @@ import (
"fmt"
"log"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
)
var data = `
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
rename to vendor/go.yaml.in/yaml/v2/apic.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
rename to vendor/go.yaml.in/yaml/v2/decode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
rename to vendor/go.yaml.in/yaml/v2/emitterc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
rename to vendor/go.yaml.in/yaml/v2/encode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
rename to vendor/go.yaml.in/yaml/v2/parserc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
rename to vendor/go.yaml.in/yaml/v2/readerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
rename to vendor/go.yaml.in/yaml/v2/resolve.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
rename to vendor/go.yaml.in/yaml/v2/scannerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
rename to vendor/go.yaml.in/yaml/v2/sorter.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
rename to vendor/go.yaml.in/yaml/v2/writerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go
similarity index 99%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
rename to vendor/go.yaml.in/yaml/v2/yaml.go
index 30813884c0..5248e1263c 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
+++ b/vendor/go.yaml.in/yaml/v2/yaml.go
@@ -2,7 +2,7 @@
//
// Source code and other details for the project are available at GitHub:
//
-// https://github.com/go-yaml/yaml
+// https://github.com/yaml/go-yaml
//
package yaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
rename to vendor/go.yaml.in/yaml/v2/yamlh.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go
diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE
new file mode 100644
index 0000000000..2683e4bb1f
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/go.yaml.in/yaml/v3/NOTICE b/vendor/go.yaml.in/yaml/v3/NOTICE
new file mode 100644
index 0000000000..866d74a7ad
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md
new file mode 100644
index 0000000000..15a85a6350
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/README.md
@@ -0,0 +1,171 @@
+go.yaml.in/yaml
+===============
+
+YAML Support for the Go Language
+
+
+## Introduction
+
+The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode
+and decode [YAML](https://yaml.org/) values.
+
+It was originally developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go
+port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to
+parse and generate YAML data quickly and reliably.
+
+
+## Project Status
+
+This project started as a fork of the extremely popular [go-yaml](
+https://github.com/go-yaml/yaml/)
+project, and is being maintained by the official [YAML organization](
+https://github.com/yaml/).
+
+The YAML team took over ongoing maintenance and development of the project after
+discussion with go-yaml's author, @niemeyer, following his decision to
+[label the project repository as "unmaintained"](
+https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025.
+
+We have put together a team of dedicated maintainers including representatives
+of go-yaml's most important downstream projects.
+
+We will strive to earn the trust of the various go-yaml forks to switch back to
+this repository as their upstream.
+
+Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you
+would like to contribute or be involved.
+
+
+## Compatibility
+
+The `yaml` package supports most of YAML 1.2, but preserves some behavior from
+1.1 for backwards compatibility.
+
+Specifically, v3 of the `yaml` package:
+
+* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being
+ decoded into a typed bool value.
+ Otherwise they behave as a string.
+ Booleans in YAML 1.2 are `true`/`false` only.
+* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than
+ `0o777` as specified in YAML 1.2, because most parsers still use the old
+ format.
+ Octals in the `0o777` format are supported though, so new files work.
+* Does not support base-60 floats.
+ These are gone from YAML 1.2, and were actually never supported by this
+ package as it's clearly a poor choice.
+
+
+## Installation and Usage
+
+The import path for the package is *go.yaml.in/yaml/v3*.
+
+To install it, run:
+
+```bash
+go get go.yaml.in/yaml/v3
+```
+
+
+## API Documentation
+
+See:
+
+
+## API Stability
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](
+https://gopkg.in).
+
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "go.yaml.in/yaml/v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
+
+## License
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go
new file mode 100644
index 0000000000..05fd305da1
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/apic.go
@@ -0,0 +1,747 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go
new file mode 100644
index 0000000000..02e2b17bfe
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/decode.go
@@ -0,0 +1,1018 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+ textless bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ // It's curious choice from the underlying API to generally return a
+ // positive result on success, but on this case return true in an error
+ // scenario. This was the source of bugs in the past (issue #666).
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ n := &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ }
+ if !p.textless {
+ n.Line = p.event.start_mark.line + 1
+ n.Column = p.event.start_mark.column + 1
+ n.HeadComment = string(p.event.head_comment)
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ }
+ return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+
+ mergedFields map[interface{}]bool
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ case 0:
+ if n.IsZero() {
+ return d.null(out)
+ }
+ fallthrough
+ default:
+ failf("cannot decode node with unknown kind %d", n.Kind)
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ return d.null(out)
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+
+ var mergeNode *Node
+
+ mapIsNew := false
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ mapIsNew = true
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ if mergedFields != nil {
+ ki := k.Interface()
+ if d.getPossiblyUnhashableKey(mergedFields, ki) {
+ continue
+ }
+ d.setPossiblyUnhashableKey(mergedFields, ki, true)
+ }
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ shortTag := n.Content[i].ShortTag()
+ if shortTag != strTag && shortTag != mergeTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+ var mergeNode *Node
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ sname := name.String()
+ if mergedFields != nil {
+ if mergedFields[sname] {
+ continue
+ }
+ mergedFields[sname] = true
+ }
+ if info, ok := sinfo.FieldsMap[sname]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) {
+ defer func() {
+ if err := recover(); err != nil {
+ failf("%v", err)
+ }
+ }()
+ m[key] = value
+}
+
+func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool {
+ defer func() {
+ if err := recover(); err != nil {
+ failf("%v", err)
+ }
+ }()
+ return m[key]
+}
+
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
+ mergedFields := d.mergedFields
+ if mergedFields == nil {
+ d.mergedFields = make(map[interface{}]bool)
+ for i := 0; i < len(parent.Content); i += 2 {
+ k := reflect.New(ifaceType).Elem()
+ if d.unmarshal(parent.Content[i], k) {
+ d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true)
+ }
+ }
+ }
+
+ switch merge.Kind {
+ case MappingNode:
+ d.unmarshal(merge, out)
+ case AliasNode:
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(merge, out)
+ case SequenceNode:
+ for i := 0; i < len(merge.Content); i++ {
+ ni := merge.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+
+ d.mergedFields = mergedFields
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go
new file mode 100644
index 0000000000..ab4e03ba72
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/emitterc.go
@@ -0,0 +1,2054 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ // [Go] This was changed so that indentations are more regular.
+ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ // The first indent inside a sequence will just skip the "- " indicator.
+ emitter.indent += 2
+ } else {
+ // Everything else aligns to the chosen indentation.
+ emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent)
+ if compact_seq {
+ // The value compact_seq passed in is almost always set to `false` when this function is called,
+ // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we
+ // are increasing the indent to account for sequence nodes, which will be correct because we need to
+ // subtract 2 to account for the - at the beginning of the sequence node.
+ emitter.indent = emitter.indent - 2
+ }
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// yaml_emitter_increase_indent preserves the original signature and delegates to
+// yaml_emitter_increase_indent_compact without compact-sequence indentation
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false)
+}
+
+// yaml_emitter_process_line_comment preserves the original signature and delegates to
+// yaml_emitter_process_line_comment_linebreak passing false for linebreak
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ return yaml_emitter_process_line_comment_linebreak(emitter, false)
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ // emitter.mapping context tells us if we are currently in a mapping context.
+ // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column.
+ // emitter.indentation tells us if the last character was an indentation character.
+ // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements.
+ // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or
+ // the last character was not an indentation character, and we consider '- ' part of the indentation
+ // for sequence elements.
+ seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) &&
+ emitter.compact_sequence_indent
+ if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if len(emitter.line_comment) > 0 {
+ // [Go] A line comment was provided for the key. That's unusual as the
+ // scanner associates line comments with the value. Either way,
+ // save the line comment and render it appropriately later.
+ emitter.key_line_comment = emitter.line_comment
+ emitter.line_comment = nil
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ if len(emitter.key_line_comment) > 0 {
+ // [Go] Line comments are generally associated with the value, but when there's
+ // no value on the same line as a mapping key they end up attached to the
+ // key itself.
+ if event.typ == yaml_SCALAR_EVENT {
+ if len(emitter.line_comment) == 0 {
+ // A scalar is coming and it has no line comments by itself yet,
+ // so just let it handle the line comment as usual. If it has a
+ // line comment, we can't have both so the one from the key is lost.
+ emitter.line_comment = emitter.key_line_comment
+ emitter.key_line_comment = nil
+ }
+ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
+ // An indented block follows, so write the comment right now.
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool {
+ if len(emitter.line_comment) == 0 {
+ // The next 3 lines are needed to resolve an issue with leading newlines
+ // See https://github.com/go-yaml/yaml/issues/755
+ // When linebreak is set to true, put_break will be called and will add
+ // the needed newline.
+ if linebreak && !put_break(emitter) {
+ return false
+ }
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
+ return false
+ }
+
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go
new file mode 100644
index 0000000000..de9e72a3e6
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/encode.go
@@ -0,0 +1,577 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case Node:
+ if !in.CanAddr() {
+ var n = reflect.New(in.Type()).Elem()
+ n.Set(in)
+ in = n
+ }
+ e.nodev(in.Addr())
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // Zero nodes behave as nil.
+ if node.Kind == 0 && node.IsZero() {
+ e.nilv()
+ return
+ }
+
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ := resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ var rtag string
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if stag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if stag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", stag)
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ default:
+ failf("cannot encode node with unknown kind %d", node.Kind)
+ }
+}
diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go
new file mode 100644
index 0000000000..25fe823637
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/parserc.go
@@ -0,0 +1,1274 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// ***********
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *************
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+//
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+//
+// block_node ::= ALIAS
+//
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+//
+// flow_node ::= ALIAS
+//
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+//
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+//
+// *************************
+//
+// block_content ::= block_collection | flow_collection | SCALAR
+//
+// ******
+//
+// flow_content ::= flow_collection | SCALAR
+//
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//
+// ******************** *********** * *********
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+//
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+ if stem_len == 0 {
+ return
+ }
+
+ token := peek_token(parser)
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ return
+ }
+
+ parser.stem_comment = parser.head_comment[:stem_len]
+ if len(parser.head_comment) == stem_len {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+ }
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+//
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+//
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *** *
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// ***** *
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+//
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+//
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - *** *
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - ***** *
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go
new file mode 100644
index 0000000000..56af245366
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go
new file mode 100644
index 0000000000..64ae888057
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go
new file mode 100644
index 0000000000..30b1f08920
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/scannerc.go
@@ -0,0 +1,3040 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+ // Sequence indicators alone have no line comments. It becomes
+ // a head comment for whatever follows.
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_line_comment(parser, start_mark) {
+ return false
+ }
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+ var next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line - parser.newlines + 1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
+ if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
+ // Got line break or terminator.
+ if close_flow || !recent_empty {
+ if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ // Alternatively, this might also be the last comment inside a flow
+ // scope, so it must be a footer.
+ if len(text) > 0 {
+ if start_mark.column-1 < next_indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go
new file mode 100644
index 0000000000..9210ece7e9
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go
new file mode 100644
index 0000000000..266d0b092c
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go
new file mode 100644
index 0000000000..0b101cd20d
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yaml.go
@@ -0,0 +1,703 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/yaml/go-yaml
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(v))
+ e.finish()
+ p := newParser(e.out)
+ p.textless = true
+ defer p.destroy()
+ doc := p.parse()
+ *n = *doc.Content[0]
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
+func (e *Encoder) CompactSeqIndent() {
+ e.encoder.emitter.compact_sequence_indent = true
+}
+
+// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
+func (e *Encoder) DefaultSeqIndent() {
+ e.encoder.emitter.compact_sequence_indent = false
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ case 0:
+ // Special case to make the zero value convenient.
+ if n.IsZero() {
+ return nullTag
+ }
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go
new file mode 100644
index 0000000000..f59aa40f64
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yamlh.go
@@ -0,0 +1,811 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+//
+// yaml_parser_set_input().
+//
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+//
+// yaml_emitter_set_output().
+//
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements?
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ key_line_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go
new file mode 100644
index 0000000000..dea1ba9610
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
index 7dd2638e88..769af387e2 100644
--- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
+++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
@@ -29,7 +29,7 @@ loop:
MOVD $NUM_ROUNDS, R21
VLD1 (R11), [V30.S4, V31.S4]
- // load contants
+ // load constants
// VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
WORD $0x4D60E940
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
index 21ca3b2ee4..048faef3a5 100644
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -3,11 +3,14 @@
// license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of the X25519 function, which
-// performs scalar multiplication on the elliptic curve known as Curve25519.
-// See RFC 7748.
+// performs scalar multiplication on the elliptic curve known as Curve25519
+// according to [RFC 7748].
//
-// This package is a wrapper for the X25519 implementation
-// in the crypto/ecdh package.
+// The curve25519 package is a wrapper for the X25519 implementation in the
+// crypto/ecdh package. It is [frozen] and is not accepting new features.
+//
+// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748
+// [frozen]: https://go.dev/wiki/Frozen
package curve25519
import "crypto/ecdh"
@@ -36,7 +39,7 @@ func ScalarBaseMult(dst, scalar *[32]byte) {
curve := ecdh.X25519()
priv, err := curve.NewPrivateKey(scalar[:])
if err != nil {
- panic("curve25519: internal error: scalarBaseMult was not 32 bytes")
+ panic("curve25519: " + err.Error())
}
copy(dst[:], priv.PublicKey().Bytes())
}
diff --git a/vendor/golang.org/x/crypto/nacl/sign/sign.go b/vendor/golang.org/x/crypto/nacl/sign/sign.go
index 109c08bb95..1cf2c4be2c 100644
--- a/vendor/golang.org/x/crypto/nacl/sign/sign.go
+++ b/vendor/golang.org/x/crypto/nacl/sign/sign.go
@@ -4,20 +4,15 @@
// Package sign signs small messages using public-key cryptography.
//
-// Sign uses Ed25519 to sign messages. The length of messages is not hidden.
-// Messages should be small because:
-// 1. The whole message needs to be held in memory to be processed.
-// 2. Using large messages pressures implementations on small machines to process
-// plaintext without verifying the signature. This is very dangerous, and this API
-// discourages it, but a protocol that uses excessive message sizes might present
-// some implementations with no other choice.
-// 3. Performance may be improved by working with messages that fit into data caches.
-// Thus large amounts of data should be chunked so that each message is small.
+// This package is interoperable with [libsodium], as well as [TweetNaCl].
//
-// This package is not interoperable with the current release of NaCl
-// (https://nacl.cr.yp.to/sign.html), which does not support Ed25519 yet. However,
-// it is compatible with the NaCl fork libsodium (https://www.libsodium.org), as well
-// as TweetNaCl (https://tweetnacl.cr.yp.to/).
+// The sign package is essentially a wrapper for the Ed25519 signature
+// algorithm (implemented by crypto/ed25519). It is [frozen] and is not accepting
+// new features.
+//
+// [libsodium]: https://libsodium.gitbook.io/doc/public-key_cryptography/public-key_signatures
+// [TweetNaCl]: https://tweetnacl.cr.yp.to/
+// [frozen]: https://go.dev/wiki/Frozen
package sign
import (
diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
index fa1a919079..490cb633ce 100644
--- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
+++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
@@ -53,7 +53,7 @@ func (c *Config) hash() crypto.Hash {
func (c *Config) encodedCount() uint8 {
if c == nil || c.S2KCount == 0 {
- return 96 // The common case. Correspoding to 65536
+ return 96 // The common case. Corresponding to 65536
}
i := c.S2KCount
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
index 37525e1a18..b357e18b0a 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/client.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/client.go
@@ -430,8 +430,9 @@ func (c *client) List() ([]*Key, error) {
return keys, nil
case *failureAgentMsg:
return nil, errors.New("agent: failed to list keys")
+ default:
+ return nil, fmt.Errorf("agent: failed to list keys, unexpected message type %T", msg)
}
- panic("unreachable")
}
// Sign has the agent sign the data using a protocol 2 key as defined
@@ -462,8 +463,9 @@ func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFl
return &sig, nil
case *failureAgentMsg:
return nil, errors.New("agent: failed to sign challenge")
+ default:
+ return nil, fmt.Errorf("agent: failed to sign challenge, unexpected message type %T", msg)
}
- panic("unreachable")
}
// unmarshal parses an agent message in packet, returning the parsed
diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
index c1b4361087..d129875510 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
@@ -112,7 +112,7 @@ func (r *keyring) Unlock(passphrase []byte) error {
}
// expireKeysLocked removes expired keys from the keyring. If a key was added
-// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
+// with a lifetimesecs constraint and seconds >= lifetimesecs seconds have
// elapsed, it is removed. The caller *must* be holding the keyring mutex.
func (r *keyring) expireKeysLocked() {
for _, k := range r.keys {
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
index 6a5b582aa9..7554ed57a9 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -8,6 +8,7 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
+ "crypto/fips140"
"crypto/rc4"
"crypto/subtle"
"encoding/binary"
@@ -15,6 +16,7 @@ import (
"fmt"
"hash"
"io"
+ "slices"
"golang.org/x/crypto/chacha20"
"golang.org/x/crypto/internal/poly1305"
@@ -93,41 +95,41 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream,
}
// cipherModes documents properties of supported ciphers. Ciphers not included
-// are not supported and will not be negotiated, even if explicitly requested in
-// ClientConfig.Crypto.Ciphers.
-var cipherModes = map[string]*cipherMode{
- // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms
- // are defined in the order specified in the RFC.
- CipherAES128CTR: {16, aes.BlockSize, streamCipherMode(0, newAESCTR)},
- CipherAES192CTR: {24, aes.BlockSize, streamCipherMode(0, newAESCTR)},
- CipherAES256CTR: {32, aes.BlockSize, streamCipherMode(0, newAESCTR)},
-
- // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers.
- // They are defined in the order specified in the RFC.
- InsecureCipherRC4128: {16, 0, streamCipherMode(1536, newRC4)},
- InsecureCipherRC4256: {32, 0, streamCipherMode(1536, newRC4)},
-
- // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
- // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
- // RC4) has problems with weak keys, and should be used with caution."
- // RFC 4345 introduces improved versions of Arcfour.
- InsecureCipherRC4: {16, 0, streamCipherMode(0, newRC4)},
-
- // AEAD ciphers
- CipherAES128GCM: {16, 12, newGCMCipher},
- CipherAES256GCM: {32, 12, newGCMCipher},
- CipherChaCha20Poly1305: {64, 0, newChaCha20Cipher},
-
+// are not supported and will not be negotiated, even if explicitly configured.
+// When FIPS mode is enabled, only FIPS-approved algorithms are included.
+var cipherModes = map[string]*cipherMode{}
+
+func init() {
+ cipherModes[CipherAES128CTR] = &cipherMode{16, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ cipherModes[CipherAES192CTR] = &cipherMode{24, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ cipherModes[CipherAES256CTR] = &cipherMode{32, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ // Use of GCM with arbitrary IVs is not allowed in FIPS 140-only mode,
+ // we'll wire it up to NewGCMForSSH in Go 1.26.
+ //
+ // For now it means we'll work with fips140=on but not fips140=only.
+ cipherModes[CipherAES128GCM] = &cipherMode{16, 12, newGCMCipher}
+ cipherModes[CipherAES256GCM] = &cipherMode{32, 12, newGCMCipher}
+
+ if fips140.Enabled() {
+ defaultCiphers = slices.DeleteFunc(defaultCiphers, func(algo string) bool {
+ _, ok := cipherModes[algo]
+ return !ok
+ })
+ return
+ }
+
+ cipherModes[CipherChaCha20Poly1305] = &cipherMode{64, 0, newChaCha20Cipher}
+ // Insecure ciphers not included in the default configuration.
+ cipherModes[InsecureCipherRC4128] = &cipherMode{16, 0, streamCipherMode(1536, newRC4)}
+ cipherModes[InsecureCipherRC4256] = &cipherMode{32, 0, streamCipherMode(1536, newRC4)}
+ cipherModes[InsecureCipherRC4] = &cipherMode{16, 0, streamCipherMode(0, newRC4)}
// CBC mode is insecure and so is not included in the default config.
// (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
- InsecureCipherAES128CBC: {16, aes.BlockSize, newAESCBCCipher},
-
- // 3des-cbc is insecure and is not included in the default
- // config.
- InsecureCipherTripleDESCBC: {24, des.BlockSize, newTripleDESCBCCipher},
+ cipherModes[InsecureCipherAES128CBC] = &cipherMode{16, aes.BlockSize, newAESCBCCipher}
+ cipherModes[InsecureCipherTripleDESCBC] = &cipherMode{24, des.BlockSize, newTripleDESCBCCipher}
}
// prefixLen is the length of the packet prefix that contains the packet length
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
index c12818fdc5..3127e49903 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
+ "slices"
"strings"
)
@@ -83,7 +84,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// success
return nil
} else if ok == authFailure {
- if m := auth.method(); !contains(tried, m) {
+ if m := auth.method(); !slices.Contains(tried, m) {
tried = append(tried, m)
}
}
@@ -97,7 +98,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
- if contains(tried, candidateMethod) {
+ if slices.Contains(tried, candidateMethod) {
continue
}
for _, meth := range methods {
@@ -117,15 +118,6 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
}
-func contains(list []string, e string) bool {
- for _, s := range list {
- if s == e {
- return true
- }
- }
- return false
-}
-
// An AuthMethod represents an instance of an RFC 4252 authentication method.
type AuthMethod interface {
// auth authenticates user over transport t.
@@ -255,7 +247,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Fallback to use if there is no "server-sig-algs" extension or a
// common algorithm cannot be found. We use the public key format if the
// MultiAlgorithmSigner supports it, otherwise we return an error.
- if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
+ if !slices.Contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v",
underlyingAlgo(keyFormat), keyFormat, as.Algorithms())
}
@@ -284,7 +276,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Filter algorithms based on those supported by MultiAlgorithmSigner.
var keyAlgos []string
for _, algo := range algorithmsForKeyFormat(keyFormat) {
- if contains(as.Algorithms(), underlyingAlgo(algo)) {
+ if slices.Contains(as.Algorithms(), underlyingAlgo(algo)) {
keyAlgos = append(keyAlgos, algo)
}
}
@@ -334,7 +326,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// the key try to use the obtained algorithm as if "server-sig-algs" had
// not been implemented if supported from the algorithm signer.
if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 {
- if contains(as.Algorithms(), KeyAlgoRSA) {
+ if slices.Contains(as.Algorithms(), KeyAlgoRSA) {
// We retry using the compat algorithm after all signers have
// been tried normally.
signers = append(signers, &multiAlgorithmSigner{
@@ -385,7 +377,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required.
- if success == authSuccess || !contains(methods, cb.method()) {
+ if success == authSuccess || !slices.Contains(methods, cb.method()) {
return success, methods, err
}
}
@@ -434,7 +426,7 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
// servers send the key type instead. OpenSSH allows any algorithm
// that matches the public key, so we do the same.
// https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709
- if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
+ if !slices.Contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
return false, nil
}
if !bytes.Equal(msg.PubKey, pubKey) {
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
index f2ec0896c2..2e44e9c9ec 100644
--- a/vendor/golang.org/x/crypto/ssh/common.go
+++ b/vendor/golang.org/x/crypto/ssh/common.go
@@ -6,6 +6,7 @@ package ssh
import (
"crypto"
+ "crypto/fips140"
"crypto/rand"
"fmt"
"io"
@@ -83,6 +84,7 @@ var (
// supportedKexAlgos specifies key-exchange algorithms implemented by this
// package in preference order, excluding those with security issues.
supportedKexAlgos = []string{
+ KeyExchangeMLKEM768X25519,
KeyExchangeCurve25519,
KeyExchangeECDHP256,
KeyExchangeECDHP384,
@@ -94,6 +96,7 @@ var (
// defaultKexAlgos specifies the default preference for key-exchange
// algorithms in preference order.
defaultKexAlgos = []string{
+ KeyExchangeMLKEM768X25519,
KeyExchangeCurve25519,
KeyExchangeECDHP256,
KeyExchangeECDHP384,
@@ -254,6 +257,40 @@ type Algorithms struct {
PublicKeyAuths []string
}
+func init() {
+ if fips140.Enabled() {
+ defaultHostKeyAlgos = slices.DeleteFunc(defaultHostKeyAlgos, func(algo string) bool {
+ _, err := hashFunc(underlyingAlgo(algo))
+ return err != nil
+ })
+ defaultPubKeyAuthAlgos = slices.DeleteFunc(defaultPubKeyAuthAlgos, func(algo string) bool {
+ _, err := hashFunc(underlyingAlgo(algo))
+ return err != nil
+ })
+ }
+}
+
+func hashFunc(format string) (crypto.Hash, error) {
+ switch format {
+ case KeyAlgoRSASHA256, KeyAlgoECDSA256, KeyAlgoSKED25519, KeyAlgoSKECDSA256:
+ return crypto.SHA256, nil
+ case KeyAlgoECDSA384:
+ return crypto.SHA384, nil
+ case KeyAlgoRSASHA512, KeyAlgoECDSA521:
+ return crypto.SHA512, nil
+ case KeyAlgoED25519:
+ // KeyAlgoED25519 doesn't pre-hash.
+ return 0, nil
+ case KeyAlgoRSA, InsecureKeyAlgoDSA:
+ if fips140.Enabled() {
+ return 0, fmt.Errorf("ssh: hash algorithm for format %q not allowed in FIPS 140 mode", format)
+ }
+ return crypto.SHA1, nil
+ default:
+ return 0, fmt.Errorf("ssh: hash algorithm for format %q not mapped", format)
+ }
+}
+
// SupportedAlgorithms returns algorithms currently implemented by this package,
// excluding those with security issues, which are returned by
// InsecureAlgorithms. The algorithms listed here are in preference order.
@@ -281,21 +318,6 @@ func InsecureAlgorithms() Algorithms {
var supportedCompressions = []string{compressionNone}
-// hashFuncs keeps the mapping of supported signature algorithms to their
-// respective hashes needed for signing and verification.
-var hashFuncs = map[string]crypto.Hash{
- KeyAlgoRSA: crypto.SHA1,
- KeyAlgoRSASHA256: crypto.SHA256,
- KeyAlgoRSASHA512: crypto.SHA512,
- InsecureKeyAlgoDSA: crypto.SHA1,
- KeyAlgoECDSA256: crypto.SHA256,
- KeyAlgoECDSA384: crypto.SHA384,
- KeyAlgoECDSA521: crypto.SHA512,
- // KeyAlgoED25519 doesn't pre-hash.
- KeyAlgoSKECDSA256: crypto.SHA256,
- KeyAlgoSKED25519: crypto.SHA256,
-}
-
// algorithmsForKeyFormat returns the supported signature algorithms for a given
// public key format (PublicKey.Type), in order of preference. See RFC 8332,
// Section 2. See also the note in sendKexInit on backwards compatibility.
@@ -310,11 +332,40 @@ func algorithmsForKeyFormat(keyFormat string) []string {
}
}
+// keyFormatForAlgorithm returns the key format corresponding to the given
+// signature algorithm. It returns an empty string if the signature algorithm is
+// invalid or unsupported.
+func keyFormatForAlgorithm(sigAlgo string) string {
+ switch sigAlgo {
+ case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512:
+ return KeyAlgoRSA
+ case CertAlgoRSAv01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01:
+ return CertAlgoRSAv01
+ case KeyAlgoED25519,
+ KeyAlgoSKED25519,
+ KeyAlgoSKECDSA256,
+ KeyAlgoECDSA256,
+ KeyAlgoECDSA384,
+ KeyAlgoECDSA521,
+ InsecureKeyAlgoDSA,
+ InsecureCertAlgoDSAv01,
+ CertAlgoECDSA256v01,
+ CertAlgoECDSA384v01,
+ CertAlgoECDSA521v01,
+ CertAlgoSKECDSA256v01,
+ CertAlgoED25519v01,
+ CertAlgoSKED25519v01:
+ return sigAlgo
+ default:
+ return ""
+ }
+}
+
// isRSA returns whether algo is a supported RSA algorithm, including certificate
// algorithms.
func isRSA(algo string) bool {
algos := algorithmsForKeyFormat(KeyAlgoRSA)
- return contains(algos, underlyingAlgo(algo))
+ return slices.Contains(algos, underlyingAlgo(algo))
}
func isRSACert(algo string) bool {
@@ -513,7 +564,7 @@ func (c *Config) SetDefaults() {
if kexAlgoMap[k] != nil {
// Ignore the KEX if we have no kexAlgoMap definition.
kexs = append(kexs, k)
- if k == KeyExchangeCurve25519 && !contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
+ if k == KeyExchangeCurve25519 && !slices.Contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
kexs = append(kexs, keyExchangeCurve25519LibSSH)
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
index 04ccce3461..5b4de9effc 100644
--- a/vendor/golang.org/x/crypto/ssh/doc.go
+++ b/vendor/golang.org/x/crypto/ssh/doc.go
@@ -17,8 +17,18 @@ References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
[SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01
+ [FIPS 140-3 mode]: https://go.dev/doc/security/fips140
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.
+
+# FIPS 140-3 mode
+
+When the program is in [FIPS 140-3 mode], this package behaves as if only SP
+800-140C and SP 800-140D approved cipher suites, signature algorithms,
+certificate public key types and sizes, and key exchange and derivation
+algorithms were implemented. Others are silently ignored and not negotiated, or
+rejected. This set may depend on the algorithms supported by the FIPS 140-3 Go
+Cryptographic Module selected with GOFIPS140, and may change across Go versions.
*/
package ssh
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
index a90bfe331c..4be3cbb6de 100644
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -10,6 +10,7 @@ import (
"io"
"log"
"net"
+ "slices"
"strings"
"sync"
)
@@ -527,7 +528,7 @@ func (t *handshakeTransport) sendKexInit() error {
switch s := k.(type) {
case MultiAlgorithmSigner:
for _, algo := range algorithmsForKeyFormat(keyFormat) {
- if contains(s.Algorithms(), underlyingAlgo(algo)) {
+ if slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
}
}
@@ -679,7 +680,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return err
}
- if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) {
+ if t.sessionID == nil && ((isClient && slices.Contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && slices.Contains(clientInit.KexAlgos, kexStrictClient))) {
t.strictMode = true
if err := t.conn.setStrictMode(); err != nil {
return err
@@ -736,7 +737,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
// message with the server-sig-algs extension if the client supports it. See
// RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9.
- if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
+ if !isClient && firstKeyExchange && slices.Contains(clientInit.KexAlgos, "ext-info-c") {
supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",")
extInfo := &extInfoMsg{
NumExtensions: 2,
@@ -790,7 +791,7 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
for _, k := range hostKeys {
if s, ok := k.(MultiAlgorithmSigner); ok {
- if !contains(s.Algorithms(), underlyingAlgo(algo)) {
+ if !slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
continue
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
index cf388a92aa..5f7fdd8514 100644
--- a/vendor/golang.org/x/crypto/ssh/kex.go
+++ b/vendor/golang.org/x/crypto/ssh/kex.go
@@ -8,13 +8,14 @@ import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
+ "crypto/fips140"
"crypto/rand"
- "crypto/subtle"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
+ "slices"
"golang.org/x/crypto/curve25519"
)
@@ -396,9 +397,27 @@ func ecHash(curve elliptic.Curve) crypto.Hash {
return crypto.SHA512
}
+// kexAlgoMap defines the supported KEXs. KEXs not included are not supported
+// and will not be negotiated, even if explicitly configured. When FIPS mode is
+// enabled, only FIPS-approved algorithms are included.
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
+ // mlkem768x25519-sha256 we'll work with fips140=on but not fips140=only
+ // until Go 1.26.
+ kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
+ kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
+ kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
+ kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
+
+ if fips140.Enabled() {
+ defaultKexAlgos = slices.DeleteFunc(defaultKexAlgos, func(algo string) bool {
+ _, ok := kexAlgoMap[algo]
+ return !ok
+ })
+ return
+ }
+
p, _ := new(big.Int).SetString(oakleyGroup2, 16)
kexAlgoMap[InsecureKeyExchangeDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
@@ -432,9 +451,6 @@ func init() {
hashFunc: crypto.SHA512,
}
- kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
- kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
- kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
kexAlgoMap[KeyExchangeCurve25519] = &curve25519sha256{}
kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{}
kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
@@ -454,15 +470,17 @@ func (kp *curve25519KeyPair) generate(rand io.Reader) error {
if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
return err
}
- curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
+ p, err := curve25519.X25519(kp.priv[:], curve25519.Basepoint)
+ if err != nil {
+ return fmt.Errorf("curve25519: %w", err)
+ }
+ if len(p) != 32 {
+ return fmt.Errorf("curve25519: internal error: X25519 returned %d bytes, expected 32", len(p))
+ }
+ copy(kp.pub[:], p)
return nil
}
-// curve25519Zeros is just an array of 32 zero bytes so that we have something
-// convenient to compare against in order to reject curve25519 points with the
-// wrong order.
-var curve25519Zeros [32]byte
-
func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
@@ -485,11 +503,9 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
- var servPub, secret [32]byte
- copy(servPub[:], reply.EphemeralPubKey)
- curve25519.ScalarMult(&secret, &kp.priv, &servPub)
- if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+ secret, err := curve25519.X25519(kp.priv[:], reply.EphemeralPubKey)
+ if err != nil {
+ return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err)
}
h := crypto.SHA256.New()
@@ -531,11 +547,9 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
return nil, err
}
- var clientPub, secret [32]byte
- copy(clientPub[:], kexInit.ClientPubKey)
- curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
- if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+ secret, err := curve25519.X25519(kp.priv[:], kexInit.ClientPubKey)
+ if err != nil {
+ return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err)
}
hostKeyBytes := priv.PublicKey().Marshal()
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index a28c0de503..a035956fcc 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -27,6 +27,7 @@ import (
"fmt"
"io"
"math/big"
+ "slices"
"strings"
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
@@ -89,6 +90,11 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
}
return cert, nil, nil
}
+ if keyFormat := keyFormatForAlgorithm(algo); keyFormat != "" {
+ return nil, nil, fmt.Errorf("ssh: signature algorithm %q isn't a key format; key is malformed and should be re-encoded with type %q",
+ algo, keyFormat)
+ }
+
return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
}
@@ -191,9 +197,10 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey
return "", nil, nil, "", nil, io.EOF
}
-// ParseAuthorizedKey parses a public key from an authorized_keys
-// file used in OpenSSH according to the sshd(8) manual page.
+// ParseAuthorizedKey parses a public key from an authorized_keys file used in
+// OpenSSH according to the sshd(8) manual page. Invalid lines are ignored.
func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
+ var lastErr error
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
@@ -222,6 +229,8 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
return out, comment, options, rest, nil
+ } else {
+ lastErr = err
}
// No key type recognised. Maybe there's an options field at
@@ -264,12 +273,18 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
options = candidateOptions
return out, comment, options, rest, nil
+ } else {
+ lastErr = err
}
in = rest
continue
}
+ if lastErr != nil {
+ return nil, "", nil, nil, fmt.Errorf("ssh: no key found; last parsing error for ignored line: %w", lastErr)
+ }
+
return nil, "", nil, nil, errors.New("ssh: no key found")
}
@@ -395,11 +410,11 @@ func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (Multi
}
for _, algo := range algorithms {
- if !contains(supportedAlgos, algo) {
+ if !slices.Contains(supportedAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q",
algo, signer.PublicKey().Type())
}
- if !contains(signerAlgos, algo) {
+ if !slices.Contains(signerAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo)
}
}
@@ -486,10 +501,13 @@ func (r *rsaPublicKey) Marshal() []byte {
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
supportedAlgos := algorithmsForKeyFormat(r.Type())
- if !contains(supportedAlgos, sig.Format) {
+ if !slices.Contains(supportedAlgos, sig.Format) {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
- hash := hashFuncs[sig.Format]
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -606,7 +624,11 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -651,7 +673,11 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
- h := hashFuncs[k.PublicKey().Type()].New()
+ hash, err := hashFunc(k.PublicKey().Type())
+ if err != nil {
+ return nil, err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
@@ -801,8 +827,11 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
-
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -905,8 +934,11 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
-
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -1009,7 +1041,11 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -1112,11 +1148,14 @@ func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
algorithm = s.pubKey.Type()
}
- if !contains(s.Algorithms(), algorithm) {
+ if !slices.Contains(s.Algorithms(), algorithm) {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
}
- hashFunc := hashFuncs[algorithm]
+ hashFunc, err := hashFunc(algorithm)
+ if err != nil {
+ return nil, err
+ }
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()
diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go
index c022e411f0..1ebd7e6da2 100644
--- a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go
+++ b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go
@@ -421,20 +421,26 @@ func New(files ...string) (ssh.HostKeyCallback, error) {
return certChecker.CheckHostKey, nil
}
-// Normalize normalizes an address into the form used in known_hosts
+// Normalize normalizes an address into the form used in known_hosts. Supports
+// IPv4, hostnames, bracketed IPv6. Any other non-standard formats are returned
+// with minimal transformation.
func Normalize(address string) string {
+ const defaultSSHPort = "22"
+
host, port, err := net.SplitHostPort(address)
if err != nil {
host = address
- port = "22"
+ port = defaultSSHPort
+ }
+
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ host = host[1 : len(host)-1]
}
- entry := host
- if port != "22" {
- entry = "[" + entry + "]:" + port
- } else if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") {
- entry = "[" + entry + "]"
+
+ if port == defaultSSHPort {
+ return host
}
- return entry
+ return "[" + host + "]:" + port
}
// Line returns a line to add append to the known_hosts files.
diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go
index de2639d57f..87d626fbbf 100644
--- a/vendor/golang.org/x/crypto/ssh/mac.go
+++ b/vendor/golang.org/x/crypto/ssh/mac.go
@@ -7,11 +7,13 @@ package ssh
// Message authentication support
import (
+ "crypto/fips140"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"hash"
+ "slices"
)
type macMode struct {
@@ -46,23 +48,37 @@ func (t truncatingMAC) Size() int {
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
-var macModes = map[string]*macMode{
- HMACSHA512ETM: {64, true, func(key []byte) hash.Hash {
+// macModes defines the supported MACs. MACs not included are not supported
+// and will not be negotiated, even if explicitly configured. When FIPS mode is
+// enabled, only FIPS-approved algorithms are included.
+var macModes = map[string]*macMode{}
+
+func init() {
+ macModes[HMACSHA512ETM] = &macMode{64, true, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
- }},
- HMACSHA256ETM: {32, true, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA256ETM] = &macMode{32, true, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
- }},
- HMACSHA512: {64, false, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA512] = &macMode{64, false, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
- }},
- HMACSHA256: {32, false, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA256] = &macMode{32, false, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
- }},
- HMACSHA1: {20, false, func(key []byte) hash.Hash {
+ }}
+
+ if fips140.Enabled() {
+ defaultMACs = slices.DeleteFunc(defaultMACs, func(algo string) bool {
+ _, ok := macModes[algo]
+ return !ok
+ })
+ return
+ }
+
+ macModes[HMACSHA1] = &macMode{20, false, func(key []byte) hash.Hash {
return hmac.New(sha1.New, key)
- }},
- InsecureHMACSHA196: {20, false, func(key []byte) hash.Hash {
+ }}
+ macModes[InsecureHMACSHA196] = &macMode{20, false, func(key []byte) hash.Hash {
return truncatingMAC{12, hmac.New(sha1.New, key)}
- }},
+ }}
}
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
index 251b9d06a3..ab22c3d38d 100644
--- a/vendor/golang.org/x/crypto/ssh/messages.go
+++ b/vendor/golang.org/x/crypto/ssh/messages.go
@@ -792,7 +792,7 @@ func marshalString(to []byte, s []byte) []byte {
return to[len(s):]
}
-var bigIntType = reflect.TypeOf((*big.Int)(nil))
+var bigIntType = reflect.TypeFor[*big.Int]()
// Decode a packet into its corresponding message.
func decode(packet []byte) (interface{}, error) {
diff --git a/vendor/golang.org/x/crypto/ssh/mlkem.go b/vendor/golang.org/x/crypto/ssh/mlkem.go
index 657e1079d4..ddc0ed1fc0 100644
--- a/vendor/golang.org/x/crypto/ssh/mlkem.go
+++ b/vendor/golang.org/x/crypto/ssh/mlkem.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.24
-
package ssh
import (
@@ -13,23 +11,10 @@ import (
"errors"
"fmt"
"io"
- "runtime"
- "slices"
"golang.org/x/crypto/curve25519"
)
-func init() {
- // After Go 1.24rc1 mlkem swapped the order of return values of Encapsulate.
- // See #70950.
- if runtime.Version() == "go1.24rc1" {
- return
- }
- supportedKexAlgos = slices.Insert(supportedKexAlgos, 0, KeyExchangeMLKEM768X25519)
- defaultKexAlgos = slices.Insert(defaultKexAlgos, 0, KeyExchangeMLKEM768X25519)
- kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
-}
-
// mlkem768WithCurve25519sha256 implements the hybrid ML-KEM768 with
// curve25519-sha256 key exchange method, as described by
// draft-kampanakis-curdle-ssh-pq-ke-05 section 2.3.3.
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index 98679ba5b6..064dcbaf5a 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -10,6 +10,7 @@ import (
"fmt"
"io"
"net"
+ "slices"
"strings"
)
@@ -43,6 +44,9 @@ type Permissions struct {
// pass data from the authentication callbacks to the server
// application layer.
Extensions map[string]string
+
+ // ExtraData allows to store user defined data.
+ ExtraData map[any]any
}
type GSSAPIWithMICConfig struct {
@@ -126,6 +130,21 @@ type ServerConfig struct {
// Permissions.Extensions entry.
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
+ // VerifiedPublicKeyCallback, if non-nil, is called after a client
+ // successfully confirms having control over a key that was previously
+ // approved by PublicKeyCallback. The permissions object passed to the
+ // callback is the one returned by PublicKeyCallback for the given public
+ // key and its ownership is transferred to the callback. The returned
+ // Permissions object can be the same object, optionally modified, or a
+ // completely new object. If VerifiedPublicKeyCallback is non-nil,
+ // PublicKeyCallback is not allowed to return a PartialSuccessError, which
+ // can instead be returned by VerifiedPublicKeyCallback.
+ //
+ // VerifiedPublicKeyCallback does not affect which authentication methods
+ // are included in the list of methods that can be attempted by the client.
+ VerifiedPublicKeyCallback func(conn ConnMetadata, key PublicKey, permissions *Permissions,
+ signatureAlgorithm string) (*Permissions, error)
+
// KeyboardInteractiveCallback, if non-nil, is called when
// keyboard-interactive authentication is selected (RFC
// 4256). The client object's Challenge function should be
@@ -246,7 +265,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
fullConf.PublicKeyAuthAlgorithms = defaultPubKeyAuthAlgos
} else {
for _, algo := range fullConf.PublicKeyAuthAlgorithms {
- if !contains(SupportedAlgorithms().PublicKeyAuths, algo) && !contains(InsecureAlgorithms().PublicKeyAuths, algo) {
+ if !slices.Contains(SupportedAlgorithms().PublicKeyAuths, algo) && !slices.Contains(InsecureAlgorithms().PublicKeyAuths, algo) {
c.Close()
return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo)
}
@@ -631,7 +650,7 @@ userAuthLoop:
return nil, parseError(msgUserAuthRequest)
}
algo := string(algoBytes)
- if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
+ if !slices.Contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break
}
@@ -652,6 +671,9 @@ userAuthLoop:
candidate.pubKeyData = pubKeyData
candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey)
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
+ if isPartialSuccessError && config.VerifiedPublicKeyCallback != nil {
+ return nil, errors.New("ssh: invalid library usage: PublicKeyCallback must not return partial success when VerifiedPublicKeyCallback is defined")
+ }
if (candidate.result == nil || isPartialSuccessError) &&
candidate.perms != nil &&
@@ -695,7 +717,7 @@ userAuthLoop:
// ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public
// key type. The algorithm and public key type must be
// consistent: both must be certificate algorithms, or neither.
- if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
+ if !slices.Contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q",
pubKey.Type(), algo)
break
@@ -705,7 +727,7 @@ userAuthLoop:
// algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but
// for certs, the names differ.
- if !contains(config.PublicKeyAuthAlgorithms, sig.Format) {
+ if !slices.Contains(config.PublicKeyAuthAlgorithms, sig.Format) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
@@ -722,6 +744,12 @@ userAuthLoop:
authErr = candidate.result
perms = candidate.perms
+ if authErr == nil && config.VerifiedPublicKeyCallback != nil {
+ // Only call VerifiedPublicKeyCallback after the key has been accepted
+ // and successfully verified. If authErr is non-nil, the key is not
+ // considered verified and the callback must not run.
+ perms, authErr = config.VerifiedPublicKeyCallback(s, pubKey, perms, algo)
+ }
}
case "gssapi-with-mic":
if authConfig.GSSAPIWithMICConfig == nil {
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
index 663619845c..fa3dd6a429 100644
--- a/vendor/golang.org/x/crypto/ssh/transport.go
+++ b/vendor/golang.org/x/crypto/ssh/transport.go
@@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"errors"
+ "fmt"
"io"
"log"
)
@@ -254,6 +255,9 @@ var (
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs DirectionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher]
+ if cipherMode == nil {
+ return nil, fmt.Errorf("ssh: unsupported cipher %v", algs.Cipher)
+ }
iv := make([]byte, cipherMode.ivSize)
key := make([]byte, cipherMode.keySize)
diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE
new file mode 100644
index 0000000000..2a7cf70da6
--- /dev/null
+++ b/vendor/golang.org/x/mod/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS
new file mode 100644
index 0000000000..733099041f
--- /dev/null
+++ b/vendor/golang.org/x/mod/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go
new file mode 100644
index 0000000000..628f8fd687
--- /dev/null
+++ b/vendor/golang.org/x/mod/semver/semver.go
@@ -0,0 +1,407 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semver implements comparison of semantic version strings.
+// In this package, semantic version strings must begin with a leading "v",
+// as in "v1.0.0".
+//
+// The general form of a semantic version string accepted by this package is
+//
+// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
+//
+// where square brackets indicate optional parts of the syntax;
+// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
+// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
+// using only alphanumeric characters and hyphens; and
+// all-numeric PRERELEASE identifiers must not have leading zeros.
+//
+// This package follows Semantic Versioning 2.0.0 (see semver.org)
+// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
+// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
+// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
+package semver
+
+import (
+ "slices"
+ "strings"
+)
+
+// parsed returns the parsed form of a semantic version string.
+type parsed struct {
+ major string
+ minor string
+ patch string
+ short string
+ prerelease string
+ build string
+}
+
+// IsValid reports whether v is a valid semantic version string.
+func IsValid(v string) bool {
+ _, ok := parse(v)
+ return ok
+}
+
+// Canonical returns the canonical formatting of the semantic version v.
+// It fills in any missing .MINOR or .PATCH and discards build metadata.
+// Two semantic versions compare equal only if their canonical formattings
+// are identical strings.
+// The canonical invalid semantic version is the empty string.
+func Canonical(v string) string {
+ p, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ if p.build != "" {
+ return v[:len(v)-len(p.build)]
+ }
+ if p.short != "" {
+ return v + p.short
+ }
+ return v
+}
+
+// Major returns the major version prefix of the semantic version v.
+// For example, Major("v2.1.0") == "v2".
+// If v is an invalid semantic version string, Major returns the empty string.
+func Major(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return v[:1+len(pv.major)]
+}
+
+// MajorMinor returns the major.minor version prefix of the semantic version v.
+// For example, MajorMinor("v2.1.0") == "v2.1".
+// If v is an invalid semantic version string, MajorMinor returns the empty string.
+func MajorMinor(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ i := 1 + len(pv.major)
+ if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
+ return v[:j]
+ }
+ return v[:i] + "." + pv.minor
+}
+
+// Prerelease returns the prerelease suffix of the semantic version v.
+// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
+// If v is an invalid semantic version string, Prerelease returns the empty string.
+func Prerelease(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.prerelease
+}
+
+// Build returns the build suffix of the semantic version v.
+// For example, Build("v2.1.0+meta") == "+meta".
+// If v is an invalid semantic version string, Build returns the empty string.
+func Build(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.build
+}
+
+// Compare returns an integer comparing two versions according to
+// semantic version precedence.
+// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
+//
+// An invalid semantic version string is considered less than a valid one.
+// All invalid semantic version strings compare equal to each other.
+func Compare(v, w string) int {
+ pv, ok1 := parse(v)
+ pw, ok2 := parse(w)
+ if !ok1 && !ok2 {
+ return 0
+ }
+ if !ok1 {
+ return -1
+ }
+ if !ok2 {
+ return +1
+ }
+ if c := compareInt(pv.major, pw.major); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.minor, pw.minor); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.patch, pw.patch); c != 0 {
+ return c
+ }
+ return comparePrerelease(pv.prerelease, pw.prerelease)
+}
+
+// Max canonicalizes its arguments and then returns the version string
+// that compares greater.
+//
+// Deprecated: use [Compare] instead. In most cases, returning a canonicalized
+// version is not expected or desired.
+func Max(v, w string) string {
+ v = Canonical(v)
+ w = Canonical(w)
+ if Compare(v, w) > 0 {
+ return v
+ }
+ return w
+}
+
+// ByVersion implements [sort.Interface] for sorting semantic version strings.
+type ByVersion []string
+
+func (vs ByVersion) Len() int { return len(vs) }
+func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
+func (vs ByVersion) Less(i, j int) bool { return compareVersion(vs[i], vs[j]) < 0 }
+
+// Sort sorts a list of semantic version strings using [Compare] and falls back
+// to use [strings.Compare] if both versions are considered equal.
+func Sort(list []string) {
+ slices.SortFunc(list, compareVersion)
+}
+
+func compareVersion(a, b string) int {
+ cmp := Compare(a, b)
+ if cmp != 0 {
+ return cmp
+ }
+ return strings.Compare(a, b)
+}
+
+func parse(v string) (p parsed, ok bool) {
+ if v == "" || v[0] != 'v' {
+ return
+ }
+ p.major, v, ok = parseInt(v[1:])
+ if !ok {
+ return
+ }
+ if v == "" {
+ p.minor = "0"
+ p.patch = "0"
+ p.short = ".0.0"
+ return
+ }
+ if v[0] != '.' {
+ ok = false
+ return
+ }
+ p.minor, v, ok = parseInt(v[1:])
+ if !ok {
+ return
+ }
+ if v == "" {
+ p.patch = "0"
+ p.short = ".0"
+ return
+ }
+ if v[0] != '.' {
+ ok = false
+ return
+ }
+ p.patch, v, ok = parseInt(v[1:])
+ if !ok {
+ return
+ }
+ if len(v) > 0 && v[0] == '-' {
+ p.prerelease, v, ok = parsePrerelease(v)
+ if !ok {
+ return
+ }
+ }
+ if len(v) > 0 && v[0] == '+' {
+ p.build, v, ok = parseBuild(v)
+ if !ok {
+ return
+ }
+ }
+ if v != "" {
+ ok = false
+ return
+ }
+ ok = true
+ return
+}
+
+func parseInt(v string) (t, rest string, ok bool) {
+ if v == "" {
+ return
+ }
+ if v[0] < '0' || '9' < v[0] {
+ return
+ }
+ i := 1
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ if v[0] == '0' && i != 1 {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parsePrerelease(v string) (t, rest string, ok bool) {
+ // "A pre-release version MAY be denoted by appending a hyphen and
+ // a series of dot separated identifiers immediately following the patch version.
+ // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
+ // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
+ if v == "" || v[0] != '-' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) && v[i] != '+' {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parseBuild(v string) (t, rest string, ok bool) {
+ if v == "" || v[0] != '+' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func isIdentChar(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
+}
+
+func isBadNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v) && i > 1 && v[0] == '0'
+}
+
+func isNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v)
+}
+
+func compareInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func comparePrerelease(x, y string) int {
+ // "When major, minor, and patch are equal, a pre-release version has
+ // lower precedence than a normal version.
+ // Example: 1.0.0-alpha < 1.0.0.
+ // Precedence for two pre-release versions with the same major, minor,
+ // and patch version MUST be determined by comparing each dot separated
+ // identifier from left to right until a difference is found as follows:
+ // identifiers consisting of only digits are compared numerically and
+ // identifiers with letters or hyphens are compared lexically in ASCII
+ // sort order. Numeric identifiers always have lower precedence than
+ // non-numeric identifiers. A larger set of pre-release fields has a
+ // higher precedence than a smaller set, if all of the preceding
+ // identifiers are equal.
+ // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
+ // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
+ if x == y {
+ return 0
+ }
+ if x == "" {
+ return +1
+ }
+ if y == "" {
+ return -1
+ }
+ for x != "" && y != "" {
+ x = x[1:] // skip - or .
+ y = y[1:] // skip - or .
+ var dx, dy string
+ dx, x = nextIdent(x)
+ dy, y = nextIdent(y)
+ if dx != dy {
+ ix := isNum(dx)
+ iy := isNum(dy)
+ if ix != iy {
+ if ix {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ if ix {
+ if len(dx) < len(dy) {
+ return -1
+ }
+ if len(dx) > len(dy) {
+ return +1
+ }
+ }
+ if dx < dy {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ }
+ if x == "" {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func nextIdent(x string) (dx, rest string) {
+ i := 0
+ for i < len(x) && x[i] != '.' {
+ i++
+ }
+ return x[:i], x[i:]
+}
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index db1c95fab1..24cea68820 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -2,44 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package context defines the Context type, which carries deadlines,
-// cancellation signals, and other request-scoped values across API boundaries
-// and between processes.
-// As of Go 1.7 this package is available in the standard library under the
-// name [context], and migrating to it can be done automatically with [go fix].
-//
-// Incoming requests to a server should create a [Context], and outgoing
-// calls to servers should accept a Context. The chain of function
-// calls between them must propagate the Context, optionally replacing
-// it with a derived Context created using [WithCancel], [WithDeadline],
-// [WithTimeout], or [WithValue].
-//
-// Programs that use Contexts should follow these rules to keep interfaces
-// consistent across packages and enable static analysis tools to check context
-// propagation:
-//
-// Do not store Contexts inside a struct type; instead, pass a Context
-// explicitly to each function that needs it. This is discussed further in
-// https://go.dev/blog/context-and-structs. The Context should be the first
-// parameter, typically named ctx:
-//
-// func DoSomething(ctx context.Context, arg Arg) error {
-// // ... use ctx ...
-// }
-//
-// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
-// if you are unsure about which Context to use.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
+// Package context has been superseded by the standard library [context] package.
//
-// The same Context may be passed to functions running in different goroutines;
-// Contexts are safe for simultaneous use by multiple goroutines.
-//
-// See https://go.dev/blog/context for example code for a server that uses
-// Contexts.
-//
-// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
+// Deprecated: Use the standard library context package instead.
package context
import (
@@ -51,36 +16,37 @@ import (
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
+//
+//go:fix inline
type Context = context.Context
// Canceled is the error returned by [Context.Err] when the context is canceled
// for some reason other than its deadline passing.
+//
+//go:fix inline
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
// due to its deadline passing.
+//
+//go:fix inline
var DeadlineExceeded = context.DeadlineExceeded
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
-func Background() Context {
- return background
-}
+//
+//go:fix inline
+func Background() Context { return context.Background() }
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter).
-func TODO() Context {
- return todo
-}
-
-var (
- background = context.Background()
- todo = context.TODO()
-)
+//
+//go:fix inline
+func TODO() Context { return context.TODO() }
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
@@ -95,6 +61,8 @@ type CancelFunc = context.CancelFunc
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
return context.WithCancel(parent)
}
@@ -108,6 +76,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
return context.WithDeadline(parent, d)
}
@@ -122,6 +92,8 @@ func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
+//
+//go:fix inline
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return context.WithTimeout(parent, timeout)
}
@@ -139,6 +111,8 @@ func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
// interface{}, context keys often have concrete type
// struct{}. Alternatively, exported context key variables' static
// type should be a pointer or interface.
+//
+//go:fix inline
func WithValue(parent Context, key, val interface{}) Context {
return context.WithValue(parent, key, val)
}
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
index 04c6bec210..12f2273706 100644
--- a/vendor/golang.org/x/net/html/escape.go
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -299,7 +299,7 @@ func escape(w writer, s string) error {
case '\r':
esc = "
"
default:
- panic("unrecognized escape character")
+ panic("html: unrecognized escape character")
}
s = s[i+1:]
if _, err := w.WriteString(esc); err != nil {
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
index 518ee4c94e..88fc0056a3 100644
--- a/vendor/golang.org/x/net/html/parse.go
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -136,7 +136,7 @@ func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
return -1
}
default:
- panic("unreachable")
+ panic(fmt.Sprintf("html: internal error: indexOfElementInScope unknown scope: %d", s))
}
}
switch s {
@@ -179,7 +179,7 @@ func (p *parser) clearStackToContext(s scope) {
return
}
default:
- panic("unreachable")
+ panic(fmt.Sprintf("html: internal error: clearStackToContext unknown scope: %d", s))
}
}
}
@@ -231,7 +231,14 @@ func (p *parser) addChild(n *Node) {
}
if n.Type == ElementNode {
- p.oe = append(p.oe, n)
+ p.insertOpenElement(n)
+ }
+}
+
+func (p *parser) insertOpenElement(n *Node) {
+ p.oe = append(p.oe, n)
+ if len(p.oe) > 512 {
+ panic("html: open stack of elements exceeds 512 nodes")
}
}
@@ -810,7 +817,7 @@ func afterHeadIM(p *parser) bool {
p.im = inFramesetIM
return true
case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
- p.oe = append(p.oe, p.head)
+ p.insertOpenElement(p.head)
defer p.oe.remove(p.head)
return inHeadIM(p)
case a.Head:
@@ -1678,7 +1685,7 @@ func inTableBodyIM(p *parser) bool {
return inTableIM(p)
}
-// Section 12.2.6.4.14.
+// Section 13.2.6.4.14.
func inRowIM(p *parser) bool {
switch p.tok.Type {
case StartTagToken:
@@ -1690,7 +1697,9 @@ func inRowIM(p *parser) bool {
p.im = inCellIM
return true
case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return false
}
@@ -1700,22 +1709,28 @@ func inRowIM(p *parser) bool {
case EndTagToken:
switch p.tok.DataAtom {
case a.Tr:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return true
}
// Ignore the token.
return true
case a.Table:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return false
}
// Ignore the token.
return true
case a.Tbody, a.Tfoot, a.Thead:
- if p.elementInScope(tableScope, p.tok.DataAtom) {
- p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
+ if p.elementInScope(tableScope, p.tok.DataAtom) && p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
+ p.im = inTableBodyIM
return false
}
// Ignore the token.
@@ -2222,16 +2237,20 @@ func parseForeignContent(p *parser) bool {
p.acknowledgeSelfClosingTag()
}
case EndTagToken:
+ if strings.EqualFold(p.oe[len(p.oe)-1].Data, p.tok.Data) {
+ p.oe = p.oe[:len(p.oe)-1]
+ return true
+ }
for i := len(p.oe) - 1; i >= 0; i-- {
- if p.oe[i].Namespace == "" {
- return p.im(p)
- }
if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
p.oe = p.oe[:i]
+ return true
+ }
+ if i > 0 && p.oe[i-1].Namespace == "" {
break
}
}
- return true
+ return p.im(p)
default:
// Ignore the token.
}
@@ -2312,9 +2331,13 @@ func (p *parser) parseCurrentToken() {
}
}
-func (p *parser) parse() error {
+func (p *parser) parse() (err error) {
+ defer func() {
+ if panicErr := recover(); panicErr != nil {
+ err = fmt.Errorf("%s", panicErr)
+ }
+ }()
// Iterate until EOF. Any other error will cause an early return.
- var err error
for err != io.EOF {
// CDATA sections are allowed only in foreign content.
n := p.oe.top()
@@ -2343,6 +2366,8 @@ func (p *parser) parse() error {
// s. Conversely, explicit s in r's data can be silently dropped,
// with no corresponding node in the resulting tree.
//
+// Parse will reject HTML that is nested deeper than 512 elements.
+//
// The input is assumed to be UTF-8 encoded.
func Parse(r io.Reader) (*Node, error) {
return ParseWithOptions(r)
diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go
index e8c1233455..0157d89e1f 100644
--- a/vendor/golang.org/x/net/html/render.go
+++ b/vendor/golang.org/x/net/html/render.go
@@ -184,7 +184,7 @@ func render1(w writer, n *Node) error {
return err
}
- // Add initial newline where there is danger of a newline beging ignored.
+ // Add initial newline where there is danger of a newline being ignored.
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
switch n.Data {
case "pre", "listing", "textarea":
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index ca645d9a1a..8a7a89d016 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -27,6 +27,7 @@ import (
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
+ StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
@@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError,
}
- fillNetHTTPServerConfig(&conf, h1)
+ fillNetHTTPConfig(&conf, h1.HTTP2)
setConfigDefaults(&conf, true)
return conf
}
@@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
- MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
- MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
- MaxReadFrameSize: h2.MaxReadFrameSize,
- SendPingTimeout: h2.ReadIdleTimeout,
- PingTimeout: h2.PingTimeout,
- WriteByteTimeout: h2.WriteByteTimeout,
+ StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
@@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config {
}
if h2.t1 != nil {
- fillNetHTTPTransportConfig(&conf, h2.t1)
+ fillNetHTTPConfig(&conf, h2.t1.HTTP2)
}
setConfigDefaults(&conf, false)
return conf
@@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 {
const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead
}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if http2ConfigStrictMaxConcurrentRequests(h2) {
+ conf.StrictMaxConcurrentRequests = true
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
deleted file mode 100644
index 5b516c55ff..0000000000
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.24
-
-package http2
-
-import "net/http"
-
-// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
- fillNetHTTPConfig(conf, srv.HTTP2)
-}
-
-// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
- fillNetHTTPConfig(conf, tr.HTTP2)
-}
-
-func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
- if h2 == nil {
- return
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxEncoderHeaderTableSize != 0 {
- conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
- }
- if h2.MaxDecoderHeaderTableSize != 0 {
- conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxReadFrameSize != 0 {
- conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
- }
- if h2.MaxReceiveBufferPerConnection != 0 {
- conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
- }
- if h2.MaxReceiveBufferPerStream != 0 {
- conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
- }
- if h2.SendPingTimeout != 0 {
- conf.SendPingTimeout = h2.SendPingTimeout
- }
- if h2.PingTimeout != 0 {
- conf.PingTimeout = h2.PingTimeout
- }
- if h2.WriteByteTimeout != 0 {
- conf.WriteByteTimeout = h2.WriteByteTimeout
- }
- if h2.PermitProhibitedCipherSuites {
- conf.PermitProhibitedCipherSuites = true
- }
- if h2.CountError != nil {
- conf.CountError = h2.CountError
- }
-}
diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go
new file mode 100644
index 0000000000..b4373fe33c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go125.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go
new file mode 100644
index 0000000000..6b071c149d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go126.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return h2.StrictMaxConcurrentRequests
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
deleted file mode 100644
index 060fd6c64c..0000000000
--- a/vendor/golang.org/x/net/http2/config_pre_go124.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.24
-
-package http2
-
-import "net/http"
-
-// Pre-Go 1.24 fallback.
-// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
-
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
-
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index db3264da8c..9a4bd123c9 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -280,6 +280,8 @@ type Framer struct {
// lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32
+ // lastFrameType holds the type of the last frame for verifying frame order.
+ lastFrameType FrameType
maxReadSize uint32
headerBuf [frameHeaderLen]byte
@@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 {
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
- 0, // 3 bytes of length, filled in in endWrite
+ 0, // 3 bytes of length, filled in endWrite
0,
0,
byte(ftype),
@@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool {
return err != nil
}
-// ReadFrame reads a single frame. The returned Frame is only valid
-// until the next call to ReadFrame.
+// ReadFrameHeader reads the header of the next frame.
+// It reads the 9-byte fixed frame header, and does not read any portion of the
+// frame payload. The caller is responsible for consuming the payload, either
+// with ReadFrameForHeader or directly from the Framer's io.Reader.
//
-// If the frame is larger than previously set with SetMaxReadFrameSize, the
-// returned error is ErrFrameTooLarge. Other errors may be of type
-// ConnectionError, StreamError, or anything else from the underlying
-// reader.
+// If the frame is larger than previously set with SetMaxReadFrameSize, it
+// returns the frame header and ErrFrameTooLarge.
//
-// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
-// indicates the stream responsible for the error.
-func (fr *Framer) ReadFrame() (Frame, error) {
+// If the returned FrameHeader.StreamID is non-zero, it indicates the stream
+// responsible for the error.
+func (fr *Framer) ReadFrameHeader() (FrameHeader, error) {
fr.errDetail = nil
- if fr.lastFrame != nil {
- fr.lastFrame.invalidate()
- }
fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
if err != nil {
- return nil, err
+ return fh, err
}
if fh.Length > fr.maxReadSize {
if fh == invalidHTTP1LookingFrameHeader() {
- return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
+ return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
}
- return nil, ErrFrameTooLarge
+ return fh, ErrFrameTooLarge
+ }
+ if err := fr.checkFrameOrder(fh); err != nil {
+ return fh, err
+ }
+ return fh, nil
+}
+
+// ReadFrameForHeader reads the payload for the frame with the given FrameHeader.
+//
+// It behaves identically to ReadFrame, other than not checking the maximum
+// frame size.
+func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) {
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
}
payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil {
@@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
}
return nil, err
}
- if err := fr.checkFrameOrder(f); err != nil {
- return nil, err
- }
+ fr.lastFrame = f
if fr.logReads {
fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
}
@@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return f, nil
}
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame or ReadFrameBodyForHeader.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+//
+// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
+// indicates the stream responsible for the error.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fh, err := fr.ReadFrameHeader()
+ if err != nil {
+ return nil, err
+ }
+ return fr.ReadFrameForHeader(fh)
+}
+
// connError returns ConnectionError(code) but first
// stashes away a public reason to the caller can optionally relay it
// to the peer before hanging up on them. This might help others debug
@@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error {
// checkFrameOrder reports an error if f is an invalid frame to return
// next from ReadFrame. Mostly it checks whether HEADERS and
// CONTINUATION frames are contiguous.
-func (fr *Framer) checkFrameOrder(f Frame) error {
- last := fr.lastFrame
- fr.lastFrame = f
+func (fr *Framer) checkFrameOrder(fh FrameHeader) error {
+ lastType := fr.lastFrameType
+ fr.lastFrameType = fh.Type
if fr.AllowIllegalReads {
return nil
}
- fh := f.Header()
if fr.lastHeaderStream != 0 {
if fh.Type != FrameContinuation {
return fr.connError(ErrCodeProtocol,
fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
fh.Type, fh.StreamID,
- last.Header().Type, fr.lastHeaderStream))
+ lastType, fr.lastHeaderStream))
}
if fh.StreamID != fr.lastHeaderStream {
return fr.connError(ErrCodeProtocol,
@@ -1152,7 +1180,16 @@ type PriorityFrame struct {
PriorityParam
}
-// PriorityParam are the stream prioritzation parameters.
+var defaultRFC9218Priority = PriorityParam{
+ incremental: 0,
+ urgency: 3,
+}
+
+// Note that HTTP/2 has had two different prioritization schemes, and
+// PriorityParam struct below is a superset of both schemes. The exported
+// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
+
+// PriorityParam are the stream prioritization parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
// stream that this stream depends on. Zero means no
@@ -1167,6 +1204,20 @@ type PriorityParam struct {
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
+
+ // "The urgency (u) parameter value is Integer (see Section 3.3.1 of
+ // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
+ // priority. The default is 3."
+ urgency uint8
+
+ // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
+ // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
+ // incrementally, i.e., provide some meaningful output as chunks of the
+ // response arrive."
+ //
+ // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
+ // avoid unnecessary type conversions and because either type takes 1 byte.
+ incremental uint8
}
func (p PriorityParam) IsZero() bool {
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
index 9933c9f8c7..9921ca096d 100644
--- a/vendor/golang.org/x/net/http2/gotrack.go
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -15,21 +15,32 @@ import (
"runtime"
"strconv"
"sync"
+ "sync/atomic"
)
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+// Setting DebugGoroutines to false during a test to disable goroutine debugging
+// results in race detector complaints when a test leaves goroutines running before
+// returning. Tests shouldn't do this, of course, but when they do it generally shows
+// up as infrequent, hard-to-debug flakes. (See #66519.)
+//
+// Disable goroutine debugging during individual tests with an atomic bool.
+// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
+// here is harmless.)
+var disableDebugGoroutines atomic.Bool
+
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return 0
}
return goroutineLock(curGoroutineID())
}
func (g goroutineLock) check() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() != uint64(g) {
@@ -38,7 +49,7 @@ func (g goroutineLock) check() {
}
func (g goroutineLock) checkNotOn() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() == uint64(g) {
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 6c18ea230b..105fe12fef 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -11,13 +11,10 @@
// requires Go 1.6 or later)
//
// See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
package http2 // import "golang.org/x/net/http2"
import (
"bufio"
- "context"
"crypto/tls"
"errors"
"fmt"
@@ -37,7 +34,6 @@ var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
- inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
@@ -257,15 +253,13 @@ func (cw closeWaiter) Wait() {
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
- group synctestGroupInterface // immutable
- conn net.Conn // immutable
- bw *bufio.Writer // non-nil when data is buffered
- byteTimeout time.Duration // immutable, WriteByteTimeout
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
return &bufferedWriter{
- group: group,
conn: conn,
byteTimeout: timeout,
}
@@ -316,24 +310,18 @@ func (w *bufferedWriter) Flush() error {
type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
- return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+ return writeWithByteTimeout(w.conn, w.byteTimeout, p)
}
// writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection,
// the write fails.
-func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 {
return conn.Write(p)
}
for {
- var now time.Time
- if group == nil {
- now = time.Now()
- } else {
- now = group.Now()
- }
- conn.SetWriteDeadline(now.Add(timeout))
+ conn.SetWriteDeadline(time.Now().Add(timeout))
nn, err := conn.Write(p[n:])
n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@@ -419,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) {
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
-
-// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
-// It's defined as an interface here to let us keep synctestGroup entirely test-only
-// and not a part of non-test builds.
-type synctestGroupInterface interface {
- Join()
- Now() time.Time
- NewTimer(d time.Duration) timer
- AfterFunc(d time.Duration, f func()) timer
- ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 51fca38f61..bdc5520ebd 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -176,44 +176,15 @@ type Server struct {
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-
- // Synchronization group used for testing.
- // Outside of tests, this is nil.
- group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
- if s.group != nil {
- s.group.Join()
- }
-}
-
-func (s *Server) now() time.Time {
- if s.group != nil {
- return s.group.Now()
- }
- return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
- if s.group != nil {
- return s.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
- if s.group != nil {
- return s.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
+
+ // Pool of error channels. This is per-Server rather than global
+ // because channels can't be reused across synctest bubbles.
+ errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() {
s.mu.Unlock()
}
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+ New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+ if s == nil {
+ return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+ }
+ return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+ if s == nil {
+ errChanPool.Put(ch) // Server used without calling ConfigureServer
+ return
+ }
+ s.errChanPool.Put(ch)
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil {
conf = new(Server)
}
- conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ conf.state = &serverInternalState{
+ activeConns: make(map[*serverConn]struct{}),
+ errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+ }
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler {
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ if opts == nil {
+ opts = &ServeConnOpts{}
+ }
s.serveConn(c, opts, nil)
}
@@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+ bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -638,11 +636,11 @@ type serverConn struct {
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer timer // nil until used
- idleTimer timer // nil if unused
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
- readIdleTimer timer // nil if unused
+ readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -687,12 +685,12 @@ type stream struct {
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline timer // nil if unused
- writeDeadline timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -848,7 +846,6 @@ type readFrameResult struct {
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- sc.srv.markNewGoroutine()
gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} }
for {
@@ -881,7 +878,6 @@ type frameWriteResult struct {
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
- sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) {
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
- sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
- lastFrameTime := sc.srv.now()
+ lastFrameTime := time.Now()
loopNum := 0
for {
loopNum++
@@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
- lastFrameTime = sc.srv.now()
+ lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
- now := sc.srv.now()
+ now := time.Now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
@@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error {
errc <- nil
}
}()
- timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C():
+ case <-timer.C:
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error {
}
}
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
+ ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed
}
}
- errChanPool.Put(ch)
+ sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() {
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = errChanPool.Get().(chan error)
+ errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil {
select {
case err := <-errc:
- errChanPool.Put(errc)
+ sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
if err == io.EOF {
b.sawEOF = true
}
- if b.conn == nil && inTests {
+ if b.conn == nil {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: errChanPool.Get().(chan error),
+ done: sc.srv.state.getErrChan(),
}
select {
@@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- errChanPool.Put(msg.done)
+ sc.srv.state.putErrChan(msg.done)
return err
}
}
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
deleted file mode 100644
index 0b1c17b812..0000000000
--- a/vendor/golang.org/x/net/http2/timer.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import "time"
-
-// A timer is a time.Timer, as an interface which can be replaced in tests.
-type timer = interface {
- C() <-chan time.Time
- Reset(d time.Duration) bool
- Stop() bool
-}
-
-// timeTimer adapts a time.Timer to the timer interface.
-type timeTimer struct {
- *time.Timer
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f26356b9cd..1965913e54 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -9,6 +9,7 @@ package http2
import (
"bufio"
"bytes"
+ "compress/flate"
"compress/gzip"
"context"
"crypto/rand"
@@ -193,50 +194,6 @@ type Transport struct {
type transportTestHooks struct {
newclientconn func(*ClientConn)
- group synctestGroupInterface
-}
-
-func (t *Transport) markNewGoroutine() {
- if t != nil && t.transportTestHooks != nil {
- t.transportTestHooks.group.Join()
- }
-}
-
-func (t *Transport) now() time.Time {
- if t != nil && t.transportTestHooks != nil {
- return t.transportTestHooks.group.Now()
- }
- return time.Now()
-}
-
-func (t *Transport) timeSince(when time.Time) time.Duration {
- if t != nil && t.transportTestHooks != nil {
- return t.now().Sub(when)
- }
- return time.Since(when)
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (t *Transport) newTimer(d time.Duration) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (t *Transport) afterFunc(d time.Duration, f func()) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
- }
- return context.WithTimeout(ctx, d)
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -366,7 +323,7 @@ type ClientConn struct {
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
- idleTimer timer
+ idleTimer *time.Timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -399,6 +356,7 @@ type ClientConn struct {
readIdleTimeout time.Duration
pingTimeout time.Duration
extendedConnectAllowed bool
+ strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive.
@@ -534,14 +492,12 @@ func (cs *clientStream) closeReqBodyLocked() {
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
go func() {
- cs.cc.t.markNewGoroutine()
cs.reqBody.Close()
close(reqBodyClosed)
}()
}
type stickyErrWriter struct {
- group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -551,7 +507,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
*sew.err = err
return n, err
}
@@ -650,9 +606,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- tm := t.newTimer(d)
+ tm := time.NewTimer(d)
select {
- case <-tm.C():
+ case <-tm.C:
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
@@ -699,6 +655,7 @@ var (
errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnNotEstablished = errors.New("http2: client conn could not be established")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -829,7 +786,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
seenSettingsChan: make(chan struct{}),
@@ -838,14 +796,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
- lastActive: t.now(),
+ lastActive: time.Now(),
}
- var group synctestGroupInterface
if t.transportTestHooks != nil {
- t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
- group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -857,7 +812,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
- group: group,
conn: c,
timeout: conf.WriteByteTimeout,
err: &cc.werr,
@@ -906,7 +860,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// Start the idle timer after the connection is fully initialized.
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
- cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
go cc.readLoop()
@@ -917,7 +871,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1067,7 +1021,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
var maxConcurrentOkay bool
- if cc.t.StrictMaxConcurrentStreams {
+ if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before
@@ -1120,7 +1074,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
// times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1186,7 +1140,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
done := make(chan struct{})
cancelled := false // guarded by cc.mu
go func() {
- cc.t.markNewGoroutine()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1257,8 +1210,7 @@ func (cc *ClientConn) closeForError(err error) {
//
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error {
- err := errors.New("http2: client connection force closed via ClientConn.Close")
- cc.closeForError(err)
+ cc.closeForError(errClientConnForceClosed)
return nil
}
@@ -1427,7 +1379,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
//
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
- cs.cc.t.markNewGoroutine()
err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err)
}
@@ -1558,9 +1509,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := cc.t.newTimer(d)
+ timer := time.NewTimer(d)
defer timer.Stop()
- respHeaderTimer = timer.C()
+ respHeaderTimer = timer.C
respHeaderRecv = cs.respHeaderRecv
}
// Wait until the peer half-closes its end of the stream,
@@ -1753,7 +1704,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
// Return a fatal error which aborts the retry loop.
return errClientConnNotEstablished
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
@@ -2092,10 +2043,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = cc.t.now()
+ cc.lastIdle = time.Now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
@@ -2121,7 +2072,6 @@ type clientConnReadLoop struct {
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
- cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -2188,9 +2138,9 @@ func (rl *clientConnReadLoop) cleanup() {
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
unusedWaitTime = cc.idleTimeout
}
- idleTime := cc.t.now().Sub(cc.lastActive)
+ idleTime := time.Now().Sub(cc.lastActive)
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
- cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})
} else {
@@ -2250,9 +2200,9 @@ func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.readIdleTimeout
- var t timer
+ var t *time.Timer
if readIdleTimeout != 0 {
- t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2998,7 +2948,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
var pingError error
errc := make(chan struct{})
go func() {
- cc.t.markNewGoroutine()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3128,35 +3077,102 @@ type erringRoundTripper struct{ err error }
func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body")
+
// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
+// get gzip.Reader from the pool on the first call to Read.
+// After Close is called it puts gzip.Reader to the pool immediately
+// if there is no Read in progress or later when Read completes.
type gzipReader struct {
_ incomparable
body io.ReadCloser // underlying Response.Body
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // sticky error
+ mu sync.Mutex // guards zr and zerr
+ zr *gzip.Reader // stores gzip reader from the pool between reads
+ zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close
}
-func (gz *gzipReader) Read(p []byte) (n int, err error) {
+type eofReader struct{}
+
+func (eofReader) Read([]byte) (int, error) { return 0, io.EOF }
+func (eofReader) ReadByte() (byte, error) { return 0, io.EOF }
+
+var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }}
+
+// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r.
+func gzipPoolGet(r io.Reader) (*gzip.Reader, error) {
+ zr := gzipPool.Get().(*gzip.Reader)
+ if err := zr.Reset(r); err != nil {
+ gzipPoolPut(zr)
+ return nil, err
+ }
+ return zr, nil
+}
+
+// gzipPoolPut puts a gzip.Reader back into the pool.
+func gzipPoolPut(zr *gzip.Reader) {
+ // Reset will allocate bufio.Reader if we pass it anything
+ // other than a flate.Reader, so ensure that it's getting one.
+ var r flate.Reader = eofReader{}
+ zr.Reset(r)
+ gzipPool.Put(zr)
+}
+
+// acquire returns a gzip.Reader for reading response body.
+// The reader must be released after use.
+func (gz *gzipReader) acquire() (*gzip.Reader, error) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
if gz.zerr != nil {
- return 0, gz.zerr
+ return nil, gz.zerr
}
if gz.zr == nil {
- gz.zr, err = gzip.NewReader(gz.body)
- if err != nil {
- gz.zerr = err
- return 0, err
+ gz.zr, gz.zerr = gzipPoolGet(gz.body)
+ if gz.zerr != nil {
+ return nil, gz.zerr
}
}
- return gz.zr.Read(p)
+ ret := gz.zr
+ gz.zr, gz.zerr = nil, errConcurrentReadOnResBody
+ return ret, nil
}
-func (gz *gzipReader) Close() error {
- if err := gz.body.Close(); err != nil {
- return err
+// release returns the gzip.Reader to the pool if Close was called during Read.
+func (gz *gzipReader) release(zr *gzip.Reader) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == errConcurrentReadOnResBody {
+ gz.zr, gz.zerr = zr, nil
+ } else { // fs.ErrClosed
+ gzipPoolPut(zr)
+ }
+}
+
+// close returns the gzip.Reader to the pool immediately or
+// signals release to do so after Read completes.
+func (gz *gzipReader) close() {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == nil && gz.zr != nil {
+ gzipPoolPut(gz.zr)
+ gz.zr = nil
}
gz.zerr = fs.ErrClosed
- return nil
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ zr, err := gz.acquire()
+ if err != nil {
+ return 0, err
+ }
+ defer gz.release(zr)
+
+ return zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ gz.close()
+
+ return gz.body.Close()
}
type errorReader struct{ err error }
@@ -3228,7 +3244,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = cc.t.timeSince(cc.lastActive)
+ ci.IdleTime = time.Since(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index cc893adc29..7de27be525 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -42,6 +42,8 @@ type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
+ // priority is used to set the priority of the newly opened stream.
+ priority PriorityParam
}
// FrameWriteRequest is a request to write a frame.
@@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
}
// writeQueue is used by implementations of WriteScheduler.
+//
+// Each writeQueue contains a queue of FrameWriteRequests, meant to store all
+// FrameWriteRequests associated with a given stream. This is implemented as a
+// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done
+// by incrementing currPos of currQueue. Adding an item is done by appending it
+// to the nextQueue. If currQueue is empty when trying to remove an item, we
+// can swap currQueue and nextQueue to remedy the situation.
+// This two-stage queue is analogous to the use of two lists in Okasaki's
+// purely functional queue but without the overhead of reversing the list when
+// swapping stages.
+//
+// writeQueue also contains prev and next, this can be used by implementations
+// of WriteScheduler to construct data structures that represent the order of
+// writing between different streams (e.g. circular linked list).
type writeQueue struct {
- s []FrameWriteRequest
+ currQueue []FrameWriteRequest
+ nextQueue []FrameWriteRequest
+ currPos int
+
prev, next *writeQueue
}
-func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+func (q *writeQueue) empty() bool {
+ return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0
+}
func (q *writeQueue) push(wr FrameWriteRequest) {
- q.s = append(q.s, wr)
+ q.nextQueue = append(q.nextQueue, wr)
}
func (q *writeQueue) shift() FrameWriteRequest {
- if len(q.s) == 0 {
+ if q.empty() {
panic("invalid use of queue")
}
- wr := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = FrameWriteRequest{}
- q.s = q.s[:len(q.s)-1]
+ if q.currPos >= len(q.currQueue) {
+ q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0]
+ }
+ wr := q.currQueue[q.currPos]
+ q.currQueue[q.currPos] = FrameWriteRequest{}
+ q.currPos++
return wr
}
+func (q *writeQueue) peek() *FrameWriteRequest {
+ if q.currPos < len(q.currQueue) {
+ return &q.currQueue[q.currPos]
+ }
+ if len(q.nextQueue) > 0 {
+ return &q.nextQueue[0]
+ }
+ return nil
+}
+
// consume consumes up to n bytes from q.s[0]. If the frame is
// entirely consumed, it is removed from the queue. If the frame
// is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed.
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
- if len(q.s) == 0 {
+ if q.empty() {
return FrameWriteRequest{}, false
}
- consumed, rest, numresult := q.s[0].Consume(n)
+ consumed, rest, numresult := q.peek().Consume(n)
switch numresult {
case 0:
return FrameWriteRequest{}, false
case 1:
q.shift()
case 2:
- q.s[0] = rest
+ *q.peek() = rest
}
return consumed, true
}
@@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue
// put inserts an unused writeQueue into the pool.
func (p *writeQueuePool) put(q *writeQueue) {
- for i := range q.s {
- q.s[i] = FrameWriteRequest{}
+ for i := range q.currQueue {
+ q.currQueue[i] = FrameWriteRequest{}
+ }
+ for i := range q.nextQueue {
+ q.nextQueue[i] = FrameWriteRequest{}
}
- q.s = q.s[:0]
+ q.currQueue = q.currQueue[:0]
+ q.nextQueue = q.nextQueue[:0]
+ q.currPos = 0
*p = append(*p, q)
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
similarity index 77%
rename from vendor/golang.org/x/net/http2/writesched_priority.go
rename to vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
index f6783339d1..4e33c29a24 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -11,7 +11,7 @@ import (
)
// RFC 7540, Section 5.3.5: the default weight is 16.
-const priorityDefaultWeight = 15 // 16 = 15 + 1
+const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
@@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
}
}
- ws := &priorityWriteScheduler{
- nodes: make(map[uint32]*priorityNode),
+ ws := &priorityWriteSchedulerRFC7540{
+ nodes: make(map[uint32]*priorityNodeRFC7540),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
return ws
}
-type priorityNodeState int
+type priorityNodeStateRFC7540 int
const (
- priorityNodeOpen priorityNodeState = iota
- priorityNodeClosed
- priorityNodeIdle
+ priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
+ priorityNodeClosedRFC7540
+ priorityNodeIdleRFC7540
)
-// priorityNode is a node in an HTTP/2 priority tree.
+// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
-type priorityNode struct {
- q writeQueue // queue of pending frames to write
- id uint32 // id of the stream, or 0 for the root of the tree
- weight uint8 // the actual weight is weight+1, so the value is in [1,256]
- state priorityNodeState // open | closed | idle
- bytes int64 // number of bytes written by this node, or 0 if closed
- subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+type priorityNodeRFC7540 struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeStateRFC7540 // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
- parent *priorityNode
- kids *priorityNode // start of the kids list
- prev, next *priorityNode // doubly-linked list of siblings
+ parent *priorityNodeRFC7540
+ kids *priorityNodeRFC7540 // start of the kids list
+ prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
}
-func (n *priorityNode) setParent(parent *priorityNode) {
+func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
if n == parent {
panic("setParent to self")
}
@@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
}
}
-func (n *priorityNode) addBytes(b int64) {
+func (n *priorityNodeRFC7540) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
@@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
-func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
@@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
- openParent = openParent || (n.state == priorityNodeOpen)
+ openParent = openParent || (n.state == priorityNodeOpenRFC7540)
}
// Common case: only one kid or all kids have the same weight.
@@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
- sort.Sort(sortPriorityNodeSiblings(*tmp))
+ sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
@@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
return false
}
-type sortPriorityNodeSiblings []*priorityNode
+type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
-func (z sortPriorityNodeSiblings) Len() int { return len(z) }
-func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
+func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
- wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
- wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
return wi >= wk
}
@@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
return bi/bk <= wi/wk
}
-type priorityWriteScheduler struct {
+type priorityWriteSchedulerRFC7540 struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
- root priorityNode
+ root priorityNodeRFC7540
// nodes maps stream ids to priority tree nodes.
- nodes map[uint32]*priorityNode
+ nodes map[uint32]*priorityNodeRFC7540
// maxID is the maximum stream id in nodes.
maxID uint32
@@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
- closedNodes, idleNodes []*priorityNode
+ closedNodes, idleNodes []*priorityNodeRFC7540
// From the config.
maxClosedNodesInTree int
@@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
- tmp []*priorityNode
+ tmp []*priorityNodeRFC7540
// pool of empty queues for reuse.
queuePool writeQueuePool
}
-func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
- if curr.state != priorityNodeIdle {
+ if curr.state != priorityNodeIdleRFC7540 {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
- curr.state = priorityNodeOpen
+ curr.state = priorityNodeOpenRFC7540
return
}
@@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
if parent == nil {
parent = &ws.root
}
- n := &priorityNode{
+ n := &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeOpen,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeOpenRFC7540,
}
n.setParent(parent)
ws.nodes[streamID] = n
@@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
}
}
-func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
- if ws.nodes[streamID].state != priorityNodeOpen {
+ if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
- n.state = priorityNodeClosed
+ n.state = priorityNodeClosedRFC7540
n.addBytes(-n.bytes)
q := n.q
ws.queuePool.put(&q)
- n.q.s = nil
if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else {
@@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
}
}
-func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
@@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
return
}
ws.maxID = streamID
- n = &priorityNode{
+ n = &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeIdle,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeIdleRFC7540,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
@@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
- n.weight = priorityDefaultWeight
+ n.weight = priorityDefaultWeightRFC7540
return
}
@@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
n.weight = priority.Weight
}
-func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
- var n *priorityNode
+func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
+ var n *priorityNodeRFC7540
if wr.isControl() {
n = &ws.root
} else {
@@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
n.q.push(wr)
}
-func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
- ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
@@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
return wr, ok
}
-func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
if maxSize == 0 {
return
}
@@ -442,7 +441,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
*list = append(*list, n)
}
-func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
for n.kids != nil {
n.kids.setParent(n.parent)
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
new file mode 100644
index 0000000000..cb4cadc32d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
@@ -0,0 +1,209 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type streamMetadata struct {
+ location *writeQueue
+ priority PriorityParam
+}
+
+type priorityWriteSchedulerRFC9218 struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // heads contain the head of a circular list of streams.
+ // We put these heads within a nested array that represents urgency and
+ // incremental, as defined in
+ // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
+ // 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
+ heads [8][2]*writeQueue
+
+ // streams contains a mapping between each stream ID and their metadata, so
+ // we can quickly locate them when needing to, for example, adjust their
+ // priority.
+ streams map[uint32]streamMetadata
+
+ // queuePool are empty queues for reuse.
+ queuePool writeQueuePool
+
+ // prioritizeIncremental is used to determine whether we should prioritize
+ // incremental streams or not, when urgency is the same in a given Pop()
+ // call.
+ prioritizeIncremental bool
+}
+
+func newPriorityWriteSchedulerRFC9218() WriteScheduler {
+ ws := &priorityWriteSchedulerRFC9218{
+ streams: make(map[uint32]streamMetadata),
+ }
+ return ws
+}
+
+func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
+ if ws.streams[streamID].location != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: opt.priority,
+ }
+
+ u, i := opt.priority.urgency, opt.priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+
+ // Remove stream from current location.
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+
+ // Insert stream to the new queue.
+ u, i = priority.urgency, priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+
+ // Update the metadata.
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: priority,
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()].location
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+
+ // On the next Pop(), we want to prioritize incremental if we prioritized
+ // non-incremental request of the same urgency this time. Vice-versa.
+ // i.e. when there are incremental and non-incremental requests at the same
+ // priority, we give 50% of our bandwidth to the incremental ones in
+ // aggregate and 50% to the first non-incremental one (since
+ // non-incremental streams do not use round-robin writes).
+ ws.prioritizeIncremental = !ws.prioritizeIncremental
+
+ // Always prioritize lowest u (i.e. highest urgency level).
+ for u := range ws.heads {
+ for i := range ws.heads[u] {
+ // When we want to prioritize incremental, we try to pop i=true
+ // first before i=false when u is the same.
+ if ws.prioritizeIncremental {
+ i = (i + 1) % 2
+ }
+ q := ws.heads[u][i]
+ if q == nil {
+ continue
+ }
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if i == 1 {
+ // For incremental streams, we update head to q.next so
+ // we can round-robin between multiple streams that can
+ // immediately benefit from partial writes.
+ ws.heads[u][i] = q.next
+ } else {
+ // For non-incremental streams, we try to finish one to
+ // completion rather than doing round-robin. However,
+ // we update head here so that if q.consume() is !ok
+ // (e.g. the stream has no more frame to consume), head
+ // is updated to the next q that has frames to consume
+ // on future iterations. This way, we do not prioritize
+ // writing to unavailable stream on next Pop() calls,
+ // preventing head-of-line blocking.
+ ws.heads[u][i] = q
+ }
+ return wr, true
+ }
+ q = q.next
+ if q == ws.heads[u][i] {
+ break
+ }
+ }
+
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
index 54fe86322d..737cff9ecb 100644
--- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
-// The round robin scheduler priorizes control frames
+// The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
index 4b70553179..1e10f89ebf 100644
--- a/vendor/golang.org/x/net/internal/httpcommon/request.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
DefaultUserAgent string
}
-// EncodeHeadersParam is the result of EncodeHeaders.
+// EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
@@ -399,7 +399,7 @@ type ServerRequestResult struct {
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
- // It might be a bit odd to return errors this way rather than returing an error,
+ // It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
index 84fcc32b63..8eedb84cec 100644
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ b/vendor/golang.org/x/net/internal/socks/socks.go
@@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter,
b = append(b, up.Username...)
b = append(b, byte(len(up.Password)))
b = append(b, up.Password...)
- // TODO(mikio): handle IO deadlines and cancelation if
+ // TODO(mikio): handle IO deadlines and cancellation if
// necessary
if _, err := rw.Write(b); err != nil {
return err
diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go
index 047cb30eb1..7ab8b3cf13 100644
--- a/vendor/golang.org/x/net/publicsuffix/list.go
+++ b/vendor/golang.org/x/net/publicsuffix/list.go
@@ -51,6 +51,7 @@ package publicsuffix // import "golang.org/x/net/publicsuffix"
import (
"fmt"
"net/http/cookiejar"
+ "net/netip"
"strings"
)
@@ -84,6 +85,10 @@ func (list) String() string {
// domains like "foo.appspot.com" can be found at
// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases
func PublicSuffix(domain string) (publicSuffix string, icann bool) {
+ if _, err := netip.ParseAddr(domain); err == nil {
+ return domain, false
+ }
+
lo, hi := uint32(0), uint32(numTLD)
s, suffix, icannNode, wildcard := domain, len(domain), false, false
loop:
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index 1d8cffae8c..2f45dbc86e 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
-// cancelation for groups of goroutines working on subtasks of a common task.
+// cancellation for groups of goroutines working on subtasks of a common task.
//
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
// returning errors.
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index 63541994ef..34c9ae76ef 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -92,6 +92,9 @@ var ARM64 struct {
HasSHA2 bool // SHA2 hardware implementation
HasCRC32 bool // CRC32 hardware implementation
HasATOMICS bool // Atomic memory operation instruction set
+ HasHPDS bool // Hierarchical permission disables in translations tables
+ HasLOR bool // Limited ordering regions
+ HasPAN bool // Privileged access never
HasFPHP bool // Half precision floating-point instruction set
HasASIMDHP bool // Advanced SIMD half precision instruction set
HasCPUID bool // CPUID identification scheme registers
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
index af2aa99f9f..f449c679fe 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
@@ -65,10 +65,10 @@ func setMinimalFeatures() {
func readARM64Registers() {
Initialized = true
- parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0())
+ parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0())
}
-func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
+func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) {
// ID_AA64ISAR0_EL1
switch extractBits(isar0, 4, 7) {
case 1:
@@ -152,6 +152,22 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
ARM64.HasI8MM = true
}
+ // ID_AA64MMFR1_EL1
+ switch extractBits(mmfr1, 12, 15) {
+ case 1, 2:
+ ARM64.HasHPDS = true
+ }
+
+ switch extractBits(mmfr1, 16, 19) {
+ case 1:
+ ARM64.HasLOR = true
+ }
+
+ switch extractBits(mmfr1, 20, 23) {
+ case 1, 2, 3:
+ ARM64.HasPAN = true
+ }
+
// ID_AA64PFR0_EL1
switch extractBits(pfr0, 16, 19) {
case 0:
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s
index 22cc99844a..a4f24b3b0c 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s
@@ -9,31 +9,34 @@
// func getisar0() uint64
TEXT ·getisar0(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 0 into x0
- // mrs x0, ID_AA64ISAR0_EL1 = d5380600
- WORD $0xd5380600
+ MRS ID_AA64ISAR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getisar1() uint64
TEXT ·getisar1(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 1 into x0
- // mrs x0, ID_AA64ISAR1_EL1 = d5380620
- WORD $0xd5380620
+ MRS ID_AA64ISAR1_EL1, R0
+ MOVD R0, ret+0(FP)
+ RET
+
+// func getmmfr1() uint64
+TEXT ·getmmfr1(SB),NOSPLIT,$0-8
+ // get Memory Model Feature Register 1 into x0
+ MRS ID_AA64MMFR1_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getpfr0() uint64
TEXT ·getpfr0(SB),NOSPLIT,$0-8
// get Processor Feature Register 0 into x0
- // mrs x0, ID_AA64PFR0_EL1 = d5380400
- WORD $0xd5380400
+ MRS ID_AA64PFR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getzfr0() uint64
TEXT ·getzfr0(SB),NOSPLIT,$0-8
// get SVE Feature Register 0 into x0
- // mrs x0, ID_AA64ZFR0_EL1 = d5380480
- WORD $0xd5380480
+ MRS ID_AA64ZFR0_EL1, R0
MOVD R0, ret+0(FP)
RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
index 6ac6e1efb2..e3fc5a8d31 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
@@ -8,5 +8,6 @@ package cpu
func getisar0() uint64
func getisar1() uint64
+func getmmfr1() uint64
func getpfr0() uint64
func getzfr0() uint64
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
index 7f1946780b..8df2079e15 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
@@ -8,4 +8,5 @@ package cpu
func getisar0() uint64 { return 0 }
func getisar1() uint64 { return 0 }
+func getmmfr1() uint64 { return 0 }
func getpfr0() uint64 { return 0 }
diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
index ebfb3fc8e7..19aea0633e 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
@@ -167,7 +167,7 @@ func doinit() {
setMinimalFeatures()
return
}
- parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0)
+ parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0)
Initialized = true
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go
index 85b64d5ccb..87fd3a7780 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go
@@ -59,7 +59,7 @@ func doinit() {
if !ok {
return
}
- parseARM64SystemRegisters(isar0, isar1, 0)
+ parseARM64SystemRegisters(isar0, isar1, 0, 0)
Initialized = true
}
diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
deleted file mode 100644
index 73687de748..0000000000
--- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.5
-
-package plan9
-
-import "syscall"
-
-func fixwd() {
- syscall.Fixwd()
-}
-
-func Getwd() (wd string, err error) {
- return syscall.Getwd()
-}
-
-func Chdir(path string) error {
- return syscall.Chdir(path)
-}
diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go
index fb94582184..7a76489db1 100644
--- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go
+++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go
@@ -2,22 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !go1.5
-
package plan9
+import "syscall"
+
func fixwd() {
+ syscall.Fixwd()
}
func Getwd() (wd string, err error) {
- fd, err := open(".", O_RDONLY)
- if err != nil {
- return "", err
- }
- defer Close(fd)
- return Fd2path(fd)
+ return syscall.Getwd()
}
func Chdir(path string) error {
- return chdir(path)
+ return syscall.Chdir(path)
}
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
index 6e5c81acd0..3ea470387b 100644
--- a/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error {
// Zero clears the set s, so that it contains no CPUs.
func (s *CPUSet) Zero() {
+ clear(s[:])
+}
+
+// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
+// will silently ignore any invalid CPU bits in [CPUSet] so this is an
+// efficient way of resetting the CPU affinity of a process.
+func (s *CPUSet) Fill() {
for i := range s {
- s[i] = 0
+ s[i] = ^cpuMask(0)
}
}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
index 9e83d18cd0..62ed12645f 100644
--- a/vendor/golang.org/x/sys/unix/fdset.go
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
// Zero clears the set fds.
func (fds *FdSet) Zero() {
- for i := range fds.Bits {
- fds.Bits[i] = 0
- }
+ clear(fds.Bits[:])
}
diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 848840ae4c..309f5a2b0c 100644
--- a/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
// clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() {
- for i := range ifr.raw.Ifru {
- ifr.raw.Ifru[i] = 0
- }
+ clear(ifr.raw.Ifru[:])
}
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index e6f31d374d..d0ed611912 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -49,6 +49,7 @@ esac
if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ set -e
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 6ab02b6c31..42517077c4 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -226,6 +226,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -349,6 +350,9 @@ struct ltchars {
#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
+// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info")
+#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME
+#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION
'
includes_NetBSD='
@@ -526,6 +530,7 @@ ccflags="$@"
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+ $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ ||
$2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 798f61ad3b..7838ca5db2 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
return
}
-// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
const minIovec = 8
func Readv(fd int, iovs [][]byte) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
-
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
n, err = readv(fd, iovecs)
@@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) {
}
func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
n, err = preadv(fd, iovecs, offset)
@@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
}
func Writev(fd int, iovs [][]byte) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
-
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
if raceenabled {
@@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) {
}
func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
-
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
if raceenabled {
@@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
}
}
-func darwinMajorMinPatch() (maj, min, patch int, err error) {
- var un Utsname
- err = Uname(&un)
- if err != nil {
- return
- }
-
- var mmp [3]int
- c := 0
-Loop:
- for _, b := range un.Release[:] {
- switch {
- case b >= '0' && b <= '9':
- mmp[c] = 10*mmp[c] + int(b-'0')
- case b == '.':
- c++
- if c > 2 {
- return 0, 0, 0, ENOTSUP
- }
- case b == 0:
- break Loop
- default:
- return 0, 0, 0, ENOTSUP
- }
- }
- if c != 2 {
- return 0, 0, 0, ENOTSUP
- }
- return mmp[0], mmp[1], mmp[2], nil
-}
-
-func darwinKernelVersionMin(maj, min, patch int) bool {
- actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch()
- if err != nil {
- return false
- }
- return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch)
-}
-
+//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 4958a65708..06c0eea6fb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
- for i := 14; i < 14+IFNAMSIZ; i++ {
- sa.raw[i] = 0
- }
+ clear(sa.raw[14 : 14+IFNAMSIZ])
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
@@ -2645,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
//sys Mseal(b []byte, flags uint) (err error)
+
+//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY
+
+func SetMemPolicy(mode int, mask *CPUSet) error {
+ return setMemPolicy(mode, mask, _CPU_SETSIZE)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 88162099af..34a4676973 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
return Statvfs1(path, buf, ST_WAIT)
}
+func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
+ var (
+ _p0 unsafe.Pointer
+ bufsize uintptr
+ )
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
/*
* Exposed directly
*/
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index abc3955477..18a3d9bdab 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Kill(pid int, signum syscall.Signal) (err error)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error)
-//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten
+//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen
//sys Lstat(path string, stat *Stat_t) (err error)
//sys Madvise(b []byte, advice int) (err error)
//sys Mkdir(path string, mode uint32) (err error)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 9e7a6c5a4f..d0a75da572 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -328,6 +328,8 @@ const (
AUDIT_KERNEL = 0x7d0
AUDIT_KERNEL_OTHER = 0x524
AUDIT_KERN_MODULE = 0x532
+ AUDIT_LANDLOCK_ACCESS = 0x58f
+ AUDIT_LANDLOCK_DOMAIN = 0x590
AUDIT_LAST_FEATURE = 0x1
AUDIT_LAST_KERN_ANOM_MSG = 0x707
AUDIT_LAST_USER_MSG = 0x4af
@@ -492,6 +494,7 @@ const (
BPF_F_BEFORE = 0x8
BPF_F_ID = 0x20
BPF_F_NETFILTER_IP_DEFRAG = 0x1
+ BPF_F_PREORDER = 0x40
BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_REDIRECT_FLAGS = 0x19
BPF_F_REPLACE = 0x4
@@ -528,6 +531,7 @@ const (
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LL_OFF = -0x200000
+ BPF_LOAD_ACQ = 0x100
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXINSNS = 0x1000
@@ -555,6 +559,7 @@ const (
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
+ BPF_STORE_REL = 0x110
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAG_SIZE = 0x8
@@ -844,24 +849,90 @@ const (
DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00
- DM_VERSION_EXTRA = "-ioctl (2025-01-17)"
+ DM_VERSION_EXTRA = "-ioctl (2025-04-28)"
DM_VERSION_MAJOR = 0x4
- DM_VERSION_MINOR = 0x31
+ DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0
+ DT_ADDRRNGHI = 0x6ffffeff
+ DT_ADDRRNGLO = 0x6ffffe00
DT_BLK = 0x6
DT_CHR = 0x2
+ DT_DEBUG = 0x15
DT_DIR = 0x4
+ DT_ENCODING = 0x20
DT_FIFO = 0x1
+ DT_FINI = 0xd
+ DT_FLAGS_1 = 0x6ffffffb
+ DT_GNU_HASH = 0x6ffffef5
+ DT_HASH = 0x4
+ DT_HIOS = 0x6ffff000
+ DT_HIPROC = 0x7fffffff
+ DT_INIT = 0xc
+ DT_JMPREL = 0x17
DT_LNK = 0xa
+ DT_LOOS = 0x6000000d
+ DT_LOPROC = 0x70000000
+ DT_NEEDED = 0x1
+ DT_NULL = 0x0
+ DT_PLTGOT = 0x3
+ DT_PLTREL = 0x14
+ DT_PLTRELSZ = 0x2
DT_REG = 0x8
+ DT_REL = 0x11
+ DT_RELA = 0x7
+ DT_RELACOUNT = 0x6ffffff9
+ DT_RELAENT = 0x9
+ DT_RELASZ = 0x8
+ DT_RELCOUNT = 0x6ffffffa
+ DT_RELENT = 0x13
+ DT_RELSZ = 0x12
+ DT_RPATH = 0xf
DT_SOCK = 0xc
+ DT_SONAME = 0xe
+ DT_STRSZ = 0xa
+ DT_STRTAB = 0x5
+ DT_SYMBOLIC = 0x10
+ DT_SYMENT = 0xb
+ DT_SYMTAB = 0x6
+ DT_TEXTREL = 0x16
DT_UNKNOWN = 0x0
+ DT_VALRNGHI = 0x6ffffdff
+ DT_VALRNGLO = 0x6ffffd00
+ DT_VERDEF = 0x6ffffffc
+ DT_VERDEFNUM = 0x6ffffffd
+ DT_VERNEED = 0x6ffffffe
+ DT_VERNEEDNUM = 0x6fffffff
+ DT_VERSYM = 0x6ffffff0
DT_WHT = 0xe
ECHO = 0x8
ECRYPTFS_SUPER_MAGIC = 0xf15f
EFD_SEMAPHORE = 0x1
EFIVARFS_MAGIC = 0xde5e81e4
EFS_SUPER_MAGIC = 0x414a53
+ EI_CLASS = 0x4
+ EI_DATA = 0x5
+ EI_MAG0 = 0x0
+ EI_MAG1 = 0x1
+ EI_MAG2 = 0x2
+ EI_MAG3 = 0x3
+ EI_NIDENT = 0x10
+ EI_OSABI = 0x7
+ EI_PAD = 0x8
+ EI_VERSION = 0x6
+ ELFCLASS32 = 0x1
+ ELFCLASS64 = 0x2
+ ELFCLASSNONE = 0x0
+ ELFCLASSNUM = 0x3
+ ELFDATA2LSB = 0x1
+ ELFDATA2MSB = 0x2
+ ELFDATANONE = 0x0
+ ELFMAG = "\177ELF"
+ ELFMAG0 = 0x7f
+ ELFMAG1 = 'E'
+ ELFMAG2 = 'L'
+ ELFMAG3 = 'F'
+ ELFOSABI_LINUX = 0x3
+ ELFOSABI_NONE = 0x0
EM_386 = 0x3
EM_486 = 0x6
EM_68K = 0x4
@@ -937,9 +1008,6 @@ const (
EPOLL_CTL_MOD = 0x3
EPOLL_IOC_TYPE = 0x8a
EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2
- ESP_V4_FLOW = 0xa
- ESP_V6_FLOW = 0xc
- ETHER_FLOW = 0x12
ETHTOOL_BUSINFO_LEN = 0x20
ETHTOOL_EROMVERS_LEN = 0x20
ETHTOOL_FAMILY_NAME = "ethtool"
@@ -1150,14 +1218,24 @@ const (
ETH_P_WCCP = 0x883e
ETH_P_X25 = 0x805
ETH_P_XDSA = 0xf8
+ ET_CORE = 0x4
+ ET_DYN = 0x3
+ ET_EXEC = 0x2
+ ET_HIPROC = 0xffff
+ ET_LOPROC = 0xff00
+ ET_NONE = 0x0
+ ET_REL = 0x1
EV_ABS = 0x3
EV_CNT = 0x20
+ EV_CURRENT = 0x1
EV_FF = 0x15
EV_FF_STATUS = 0x17
EV_KEY = 0x1
EV_LED = 0x11
EV_MAX = 0x1f
EV_MSC = 0x4
+ EV_NONE = 0x0
+ EV_NUM = 0x2
EV_PWR = 0x16
EV_REL = 0x2
EV_REP = 0x14
@@ -1213,6 +1291,7 @@ const (
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_ERROR = 0x5
FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_INFO_TYPE_MNT = 0x7
FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc
FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa
FAN_EVENT_INFO_TYPE_PIDFD = 0x4
@@ -1231,9 +1310,12 @@ const (
FAN_MARK_IGNORED_SURV_MODIFY = 0x40
FAN_MARK_IGNORE_SURV = 0x440
FAN_MARK_INODE = 0x0
+ FAN_MARK_MNTNS = 0x110
FAN_MARK_MOUNT = 0x10
FAN_MARK_ONLYDIR = 0x8
FAN_MARK_REMOVE = 0x2
+ FAN_MNT_ATTACH = 0x1000000
+ FAN_MNT_DETACH = 0x2000000
FAN_MODIFY = 0x2
FAN_MOVE = 0xc0
FAN_MOVED_FROM = 0x40
@@ -1255,6 +1337,7 @@ const (
FAN_REPORT_DIR_FID = 0x400
FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
+ FAN_REPORT_MNT = 0x4000
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
FAN_REPORT_TARGET_FID = 0x1000
@@ -1274,6 +1357,7 @@ const (
FIB_RULE_PERMANENT = 0x1
FIB_RULE_UNRESOLVED = 0x4
FIDEDUPERANGE = 0xc0189436
+ FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1
FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8
FSCRYPT_KEY_DESC_PREFIX = "fscrypt:"
FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8
@@ -1582,7 +1666,6 @@ const (
IPV6_DONTFRAG = 0x3e
IPV6_DROP_MEMBERSHIP = 0x15
IPV6_DSTOPTS = 0x3b
- IPV6_FLOW = 0x11
IPV6_FREEBIND = 0x4e
IPV6_HDRINCL = 0x24
IPV6_HOPLIMIT = 0x34
@@ -1633,7 +1716,6 @@ const (
IPV6_TRANSPARENT = 0x4b
IPV6_UNICAST_HOPS = 0x10
IPV6_UNICAST_IF = 0x4c
- IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
@@ -1695,7 +1777,6 @@ const (
IP_TTL = 0x2
IP_UNBLOCK_SOURCE = 0x25
IP_UNICAST_IF = 0x32
- IP_USER_FLOW = 0xd
IP_XFRM_POLICY = 0x11
ISOFS_SUPER_MAGIC = 0x9660
ISTRIP = 0x20
@@ -1817,7 +1898,11 @@ const (
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
LANDLOCK_ACCESS_NET_BIND_TCP = 0x1
LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2
+ LANDLOCK_CREATE_RULESET_ERRATA = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2
+ LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4
LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1
LANDLOCK_SCOPE_SIGNAL = 0x2
LINUX_REBOOT_CMD_CAD_OFF = 0x0
@@ -2267,7 +2352,167 @@ const (
NLM_F_REPLACE = 0x100
NLM_F_REQUEST = 0x1
NLM_F_ROOT = 0x100
+ NN_386_IOPERM = "LINUX"
+ NN_386_TLS = "LINUX"
+ NN_ARC_V2 = "LINUX"
+ NN_ARM_FPMR = "LINUX"
+ NN_ARM_GCS = "LINUX"
+ NN_ARM_HW_BREAK = "LINUX"
+ NN_ARM_HW_WATCH = "LINUX"
+ NN_ARM_PACA_KEYS = "LINUX"
+ NN_ARM_PACG_KEYS = "LINUX"
+ NN_ARM_PAC_ENABLED_KEYS = "LINUX"
+ NN_ARM_PAC_MASK = "LINUX"
+ NN_ARM_POE = "LINUX"
+ NN_ARM_SSVE = "LINUX"
+ NN_ARM_SVE = "LINUX"
+ NN_ARM_SYSTEM_CALL = "LINUX"
+ NN_ARM_TAGGED_ADDR_CTRL = "LINUX"
+ NN_ARM_TLS = "LINUX"
+ NN_ARM_VFP = "LINUX"
+ NN_ARM_ZA = "LINUX"
+ NN_ARM_ZT = "LINUX"
+ NN_AUXV = "CORE"
+ NN_FILE = "CORE"
+ NN_GNU_PROPERTY_TYPE_0 = "GNU"
+ NN_LOONGARCH_CPUCFG = "LINUX"
+ NN_LOONGARCH_CSR = "LINUX"
+ NN_LOONGARCH_HW_BREAK = "LINUX"
+ NN_LOONGARCH_HW_WATCH = "LINUX"
+ NN_LOONGARCH_LASX = "LINUX"
+ NN_LOONGARCH_LBT = "LINUX"
+ NN_LOONGARCH_LSX = "LINUX"
+ NN_MIPS_DSP = "LINUX"
+ NN_MIPS_FP_MODE = "LINUX"
+ NN_MIPS_MSA = "LINUX"
+ NN_PPC_DEXCR = "LINUX"
+ NN_PPC_DSCR = "LINUX"
+ NN_PPC_EBB = "LINUX"
+ NN_PPC_HASHKEYR = "LINUX"
+ NN_PPC_PKEY = "LINUX"
+ NN_PPC_PMU = "LINUX"
+ NN_PPC_PPR = "LINUX"
+ NN_PPC_SPE = "LINUX"
+ NN_PPC_TAR = "LINUX"
+ NN_PPC_TM_CDSCR = "LINUX"
+ NN_PPC_TM_CFPR = "LINUX"
+ NN_PPC_TM_CGPR = "LINUX"
+ NN_PPC_TM_CPPR = "LINUX"
+ NN_PPC_TM_CTAR = "LINUX"
+ NN_PPC_TM_CVMX = "LINUX"
+ NN_PPC_TM_CVSX = "LINUX"
+ NN_PPC_TM_SPR = "LINUX"
+ NN_PPC_VMX = "LINUX"
+ NN_PPC_VSX = "LINUX"
+ NN_PRFPREG = "CORE"
+ NN_PRPSINFO = "CORE"
+ NN_PRSTATUS = "CORE"
+ NN_PRXFPREG = "LINUX"
+ NN_RISCV_CSR = "LINUX"
+ NN_RISCV_TAGGED_ADDR_CTRL = "LINUX"
+ NN_RISCV_VECTOR = "LINUX"
+ NN_S390_CTRS = "LINUX"
+ NN_S390_GS_BC = "LINUX"
+ NN_S390_GS_CB = "LINUX"
+ NN_S390_HIGH_GPRS = "LINUX"
+ NN_S390_LAST_BREAK = "LINUX"
+ NN_S390_PREFIX = "LINUX"
+ NN_S390_PV_CPU_DATA = "LINUX"
+ NN_S390_RI_CB = "LINUX"
+ NN_S390_SYSTEM_CALL = "LINUX"
+ NN_S390_TDB = "LINUX"
+ NN_S390_TIMER = "LINUX"
+ NN_S390_TODCMP = "LINUX"
+ NN_S390_TODPREG = "LINUX"
+ NN_S390_VXRS_HIGH = "LINUX"
+ NN_S390_VXRS_LOW = "LINUX"
+ NN_SIGINFO = "CORE"
+ NN_TASKSTRUCT = "CORE"
+ NN_VMCOREDD = "LINUX"
+ NN_X86_SHSTK = "LINUX"
+ NN_X86_XSAVE_LAYOUT = "LINUX"
+ NN_X86_XSTATE = "LINUX"
NSFS_MAGIC = 0x6e736673
+ NT_386_IOPERM = 0x201
+ NT_386_TLS = 0x200
+ NT_ARC_V2 = 0x600
+ NT_ARM_FPMR = 0x40e
+ NT_ARM_GCS = 0x410
+ NT_ARM_HW_BREAK = 0x402
+ NT_ARM_HW_WATCH = 0x403
+ NT_ARM_PACA_KEYS = 0x407
+ NT_ARM_PACG_KEYS = 0x408
+ NT_ARM_PAC_ENABLED_KEYS = 0x40a
+ NT_ARM_PAC_MASK = 0x406
+ NT_ARM_POE = 0x40f
+ NT_ARM_SSVE = 0x40b
+ NT_ARM_SVE = 0x405
+ NT_ARM_SYSTEM_CALL = 0x404
+ NT_ARM_TAGGED_ADDR_CTRL = 0x409
+ NT_ARM_TLS = 0x401
+ NT_ARM_VFP = 0x400
+ NT_ARM_ZA = 0x40c
+ NT_ARM_ZT = 0x40d
+ NT_AUXV = 0x6
+ NT_FILE = 0x46494c45
+ NT_GNU_PROPERTY_TYPE_0 = 0x5
+ NT_LOONGARCH_CPUCFG = 0xa00
+ NT_LOONGARCH_CSR = 0xa01
+ NT_LOONGARCH_HW_BREAK = 0xa05
+ NT_LOONGARCH_HW_WATCH = 0xa06
+ NT_LOONGARCH_LASX = 0xa03
+ NT_LOONGARCH_LBT = 0xa04
+ NT_LOONGARCH_LSX = 0xa02
+ NT_MIPS_DSP = 0x800
+ NT_MIPS_FP_MODE = 0x801
+ NT_MIPS_MSA = 0x802
+ NT_PPC_DEXCR = 0x111
+ NT_PPC_DSCR = 0x105
+ NT_PPC_EBB = 0x106
+ NT_PPC_HASHKEYR = 0x112
+ NT_PPC_PKEY = 0x110
+ NT_PPC_PMU = 0x107
+ NT_PPC_PPR = 0x104
+ NT_PPC_SPE = 0x101
+ NT_PPC_TAR = 0x103
+ NT_PPC_TM_CDSCR = 0x10f
+ NT_PPC_TM_CFPR = 0x109
+ NT_PPC_TM_CGPR = 0x108
+ NT_PPC_TM_CPPR = 0x10e
+ NT_PPC_TM_CTAR = 0x10d
+ NT_PPC_TM_CVMX = 0x10a
+ NT_PPC_TM_CVSX = 0x10b
+ NT_PPC_TM_SPR = 0x10c
+ NT_PPC_VMX = 0x100
+ NT_PPC_VSX = 0x102
+ NT_PRFPREG = 0x2
+ NT_PRPSINFO = 0x3
+ NT_PRSTATUS = 0x1
+ NT_PRXFPREG = 0x46e62b7f
+ NT_RISCV_CSR = 0x900
+ NT_RISCV_TAGGED_ADDR_CTRL = 0x902
+ NT_RISCV_VECTOR = 0x901
+ NT_S390_CTRS = 0x304
+ NT_S390_GS_BC = 0x30c
+ NT_S390_GS_CB = 0x30b
+ NT_S390_HIGH_GPRS = 0x300
+ NT_S390_LAST_BREAK = 0x306
+ NT_S390_PREFIX = 0x305
+ NT_S390_PV_CPU_DATA = 0x30e
+ NT_S390_RI_CB = 0x30d
+ NT_S390_SYSTEM_CALL = 0x307
+ NT_S390_TDB = 0x308
+ NT_S390_TIMER = 0x301
+ NT_S390_TODCMP = 0x302
+ NT_S390_TODPREG = 0x303
+ NT_S390_VXRS_HIGH = 0x30a
+ NT_S390_VXRS_LOW = 0x309
+ NT_SIGINFO = 0x53494749
+ NT_TASKSTRUCT = 0x4
+ NT_VMCOREDD = 0x700
+ NT_X86_SHSTK = 0x204
+ NT_X86_XSAVE_LAYOUT = 0x205
+ NT_X86_XSTATE = 0x202
OCFS2_SUPER_MAGIC = 0x7461636f
OCRNL = 0x8
OFDEL = 0x80
@@ -2454,6 +2699,59 @@ const (
PERF_RECORD_MISC_USER = 0x2
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
+ PF_ALG = 0x26
+ PF_APPLETALK = 0x5
+ PF_ASH = 0x12
+ PF_ATMPVC = 0x8
+ PF_ATMSVC = 0x14
+ PF_AX25 = 0x3
+ PF_BLUETOOTH = 0x1f
+ PF_BRIDGE = 0x7
+ PF_CAIF = 0x25
+ PF_CAN = 0x1d
+ PF_DECnet = 0xc
+ PF_ECONET = 0x13
+ PF_FILE = 0x1
+ PF_IB = 0x1b
+ PF_IEEE802154 = 0x24
+ PF_INET = 0x2
+ PF_INET6 = 0xa
+ PF_IPX = 0x4
+ PF_IRDA = 0x17
+ PF_ISDN = 0x22
+ PF_IUCV = 0x20
+ PF_KCM = 0x29
+ PF_KEY = 0xf
+ PF_LLC = 0x1a
+ PF_LOCAL = 0x1
+ PF_MAX = 0x2e
+ PF_MCTP = 0x2d
+ PF_MPLS = 0x1c
+ PF_NETBEUI = 0xd
+ PF_NETLINK = 0x10
+ PF_NETROM = 0x6
+ PF_NFC = 0x27
+ PF_PACKET = 0x11
+ PF_PHONET = 0x23
+ PF_PPPOX = 0x18
+ PF_QIPCRTR = 0x2a
+ PF_R = 0x4
+ PF_RDS = 0x15
+ PF_ROSE = 0xb
+ PF_ROUTE = 0x10
+ PF_RXRPC = 0x21
+ PF_SECURITY = 0xe
+ PF_SMC = 0x2b
+ PF_SNA = 0x16
+ PF_TIPC = 0x1e
+ PF_UNIX = 0x1
+ PF_UNSPEC = 0x0
+ PF_VSOCK = 0x28
+ PF_W = 0x2
+ PF_WANPIPE = 0x19
+ PF_X = 0x1
+ PF_X25 = 0x9
+ PF_XDP = 0x2c
PID_FS_MAGIC = 0x50494446
PIPEFS_MAGIC = 0x50495045
PPPIOCGNPMODE = 0xc008744c
@@ -2493,6 +2791,10 @@ const (
PR_FP_EXC_UND = 0x40000
PR_FP_MODE_FR = 0x1
PR_FP_MODE_FRE = 0x2
+ PR_FUTEX_HASH = 0x4e
+ PR_FUTEX_HASH_GET_IMMUTABLE = 0x3
+ PR_FUTEX_HASH_GET_SLOTS = 0x2
+ PR_FUTEX_HASH_SET_SLOTS = 0x1
PR_GET_AUXV = 0x41555856
PR_GET_CHILD_SUBREAPER = 0x25
PR_GET_DUMPABLE = 0x3
@@ -2652,6 +2954,10 @@ const (
PR_TAGGED_ADDR_ENABLE = 0x1
PR_TASK_PERF_EVENTS_DISABLE = 0x1f
PR_TASK_PERF_EVENTS_ENABLE = 0x20
+ PR_TIMER_CREATE_RESTORE_IDS = 0x4d
+ PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2
+ PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0
+ PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1
PR_TIMING_STATISTICAL = 0x0
PR_TIMING_TIMESTAMP = 0x1
PR_TSC_ENABLE = 0x1
@@ -2732,6 +3038,7 @@ const (
PTRACE_SETREGSET = 0x4205
PTRACE_SETSIGINFO = 0x4203
PTRACE_SETSIGMASK = 0x420b
+ PTRACE_SET_SYSCALL_INFO = 0x4212
PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210
PTRACE_SINGLESTEP = 0x9
PTRACE_SYSCALL = 0x18
@@ -2740,6 +3047,23 @@ const (
PTRACE_SYSCALL_INFO_NONE = 0x0
PTRACE_SYSCALL_INFO_SECCOMP = 0x3
PTRACE_TRACEME = 0x0
+ PT_AARCH64_MEMTAG_MTE = 0x70000002
+ PT_DYNAMIC = 0x2
+ PT_GNU_EH_FRAME = 0x6474e550
+ PT_GNU_PROPERTY = 0x6474e553
+ PT_GNU_RELRO = 0x6474e552
+ PT_GNU_STACK = 0x6474e551
+ PT_HIOS = 0x6fffffff
+ PT_HIPROC = 0x7fffffff
+ PT_INTERP = 0x3
+ PT_LOAD = 0x1
+ PT_LOOS = 0x60000000
+ PT_LOPROC = 0x70000000
+ PT_NOTE = 0x4
+ PT_NULL = 0x0
+ PT_PHDR = 0x6
+ PT_SHLIB = 0x5
+ PT_TLS = 0x7
P_ALL = 0x0
P_PGID = 0x2
P_PID = 0x1
@@ -2982,6 +3306,7 @@ const (
RTPROT_NTK = 0xf
RTPROT_OPENR = 0x63
RTPROT_OSPF = 0xbc
+ RTPROT_OVN = 0x54
RTPROT_RA = 0x9
RTPROT_REDIRECT = 0x1
RTPROT_RIP = 0xbd
@@ -3072,6 +3397,47 @@ const (
SEEK_MAX = 0x4
SEEK_SET = 0x0
SELINUX_MAGIC = 0xf97cff8c
+ SHF_ALLOC = 0x2
+ SHF_EXCLUDE = 0x8000000
+ SHF_EXECINSTR = 0x4
+ SHF_GROUP = 0x200
+ SHF_INFO_LINK = 0x40
+ SHF_LINK_ORDER = 0x80
+ SHF_MASKOS = 0xff00000
+ SHF_MASKPROC = 0xf0000000
+ SHF_MERGE = 0x10
+ SHF_ORDERED = 0x4000000
+ SHF_OS_NONCONFORMING = 0x100
+ SHF_RELA_LIVEPATCH = 0x100000
+ SHF_RO_AFTER_INIT = 0x200000
+ SHF_STRINGS = 0x20
+ SHF_TLS = 0x400
+ SHF_WRITE = 0x1
+ SHN_ABS = 0xfff1
+ SHN_COMMON = 0xfff2
+ SHN_HIPROC = 0xff1f
+ SHN_HIRESERVE = 0xffff
+ SHN_LIVEPATCH = 0xff20
+ SHN_LOPROC = 0xff00
+ SHN_LORESERVE = 0xff00
+ SHN_UNDEF = 0x0
+ SHT_DYNAMIC = 0x6
+ SHT_DYNSYM = 0xb
+ SHT_HASH = 0x5
+ SHT_HIPROC = 0x7fffffff
+ SHT_HIUSER = 0xffffffff
+ SHT_LOPROC = 0x70000000
+ SHT_LOUSER = 0x80000000
+ SHT_NOBITS = 0x8
+ SHT_NOTE = 0x7
+ SHT_NULL = 0x0
+ SHT_NUM = 0xc
+ SHT_PROGBITS = 0x1
+ SHT_REL = 0x9
+ SHT_RELA = 0x4
+ SHT_SHLIB = 0xa
+ SHT_STRTAB = 0x3
+ SHT_SYMTAB = 0x2
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -3298,6 +3664,16 @@ const (
STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
+ STB_GLOBAL = 0x1
+ STB_LOCAL = 0x0
+ STB_WEAK = 0x2
+ STT_COMMON = 0x5
+ STT_FILE = 0x4
+ STT_FUNC = 0x2
+ STT_NOTYPE = 0x0
+ STT_OBJECT = 0x1
+ STT_SECTION = 0x3
+ STT_TLS = 0x6
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
SYNC_FILE_RANGE_WRITE = 0x2
@@ -3336,7 +3712,7 @@ const (
TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6
- TASKSTATS_VERSION = 0xf
+ TASKSTATS_VERSION = 0x10
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@@ -3406,8 +3782,6 @@ const (
TCP_TX_DELAY = 0x25
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
- TCP_V4_FLOW = 0x1
- TCP_V6_FLOW = 0x5
TCP_WINDOW_CLAMP = 0xa
TCP_ZEROCOPY_RECEIVE = 0x23
TFD_TIMER_ABSTIME = 0x1
@@ -3530,14 +3904,14 @@ const (
UDP_NO_CHECK6_RX = 0x66
UDP_NO_CHECK6_TX = 0x65
UDP_SEGMENT = 0x67
- UDP_V4_FLOW = 0x2
- UDP_V6_FLOW = 0x6
UMOUNT_NOFOLLOW = 0x8
USBDEVICE_SUPER_MAGIC = 0x9fa2
UTIME_NOW = 0x3fffffff
UTIME_OMIT = 0x3ffffffe
V9FS_MAGIC = 0x1021997
VERASE = 0x2
+ VER_FLG_BASE = 0x1
+ VER_FLG_WEAK = 0x2
VINTR = 0x0
VKILL = 0x3
VLNEXT = 0xf
@@ -3574,7 +3948,7 @@ const (
WDIOS_TEMPPANIC = 0x4
WDIOS_UNKNOWN = -0x1
WEXITED = 0x4
- WGALLOWEDIP_A_MAX = 0x3
+ WGALLOWEDIP_A_MAX = 0x4
WGDEVICE_A_MAX = 0x8
WGPEER_A_MAX = 0xa
WG_CMD_MAX = 0x1
@@ -3688,6 +4062,7 @@ const (
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TXMD_FLAGS_CHECKSUM = 0x2
+ XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4
XDP_TXMD_FLAGS_TIMESTAMP = 0x1
XDP_TX_METADATA = 0x2
XDP_TX_RING = 0x3
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index a8c421e29b..1c37f9fbc4 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -360,6 +361,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 9a88d18130..6f54d34aef 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -361,6 +362,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 7cb6a867ef..783ec5c126 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -366,6 +367,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index d0ecd2c583..ca83d3ba16 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 7a2940ae0a..607e611c0c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -353,6 +354,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index d14ca8f2ec..b9cb5bd3c0 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 2da1bac1e3..65b078a638 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 28727514b5..5298a3033d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 7f287b54b5..7bc557c876 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 7e5f9e6aa8..152399bb04 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -414,6 +415,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 37c87952fc..1a1ce2409c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 5220133613..4231a1fb57 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 4bfe2b5b6e..21c0e95266 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -350,6 +351,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index e3cffb869a..f00d1cd7cf 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -422,6 +423,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index c219c8db39..bc8d539e6a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -71,6 +71,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -461,6 +462,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x2
SO_PASSPIDFD = 0x55
+ SO_PASSRIGHTS = 0x5c
SO_PASSSEC = 0x1f
SO_PEEK_OFF = 0x26
SO_PEERCRED = 0x40
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 5cc1e8eb2f..8935d10a31 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setMemPolicy(mode int, mask *CPUSet, size int) (err error) {
+ _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index c6545413c4..b4609c20c2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -72,7 +72,7 @@ import (
//go:cgo_import_dynamic libc_kill kill "libc.so"
//go:cgo_import_dynamic libc_lchown lchown "libc.so"
//go:cgo_import_dynamic libc_link link "libc.so"
-//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so"
+//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so"
//go:cgo_import_dynamic libc_lstat lstat "libc.so"
//go:cgo_import_dynamic libc_madvise madvise "libc.so"
//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
@@ -221,7 +221,7 @@ import (
//go:linkname procKill libc_kill
//go:linkname procLchown libc_lchown
//go:linkname procLink libc_link
-//go:linkname proc__xnet_llisten libc___xnet_llisten
+//go:linkname proc__xnet_listen libc___xnet_listen
//go:linkname procLstat libc_lstat
//go:linkname procMadvise libc_madvise
//go:linkname procMkdir libc_mkdir
@@ -371,7 +371,7 @@ var (
procKill,
procLchown,
procLink,
- proc__xnet_llisten,
+ proc__xnet_listen,
procLstat,
procMadvise,
procMkdir,
@@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
- _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index c79aaff306..aca56ee494 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -462,4 +462,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 5eb450695e..2ea1ef58c3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -385,4 +385,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 05e5029744..d22c8af319 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -426,4 +426,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 38c53ec51b..5ee264ae97 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -329,4 +329,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 31d2e71a18..f9f03ebf5f 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -325,4 +325,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index f4184a336b..87c2118e84 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -446,4 +446,5 @@ const (
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 05b9962278..391ad102fb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -376,4 +376,5 @@ const (
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 43a256e9e6..5656157757 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -376,4 +376,5 @@ const (
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index eea5ddfc22..0482b52e3c 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -446,4 +446,5 @@ const (
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index 0d777bfbb1..71806f08f3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -453,4 +453,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index b446365025..e35a710582 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -425,4 +425,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 0c7d21c188..2aea476705 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -425,4 +425,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 8405391698..6c9bb4e560 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -330,4 +330,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index fcf1b790d6..680bc9915a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -391,4 +391,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 52d15b5f9d..620f271052 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -404,4 +404,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 8bcac2835f..c1a4670171 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -115,7 +115,9 @@ type Statx_t struct {
Atomic_write_unit_max uint32
Atomic_write_segments_max uint32
Dio_read_offset_align uint32
- _ [9]uint64
+ Atomic_write_unit_max_opt uint32
+ _ [1]uint32
+ _ [8]uint64
}
type Fsid struct {
@@ -199,7 +201,8 @@ type FscryptAddKeyArg struct {
Key_spec FscryptKeySpecifier
Raw_size uint32
Key_id uint32
- _ [8]uint32
+ Flags uint32
+ _ [7]uint32
}
type FscryptRemoveKeyArg struct {
@@ -629,6 +632,8 @@ const (
IFA_FLAGS = 0x8
IFA_RT_PRIORITY = 0x9
IFA_TARGET_NETNSID = 0xa
+ IFAL_LABEL = 0x2
+ IFAL_ADDRESS = 0x1
RT_SCOPE_UNIVERSE = 0x0
RT_SCOPE_SITE = 0xc8
RT_SCOPE_LINK = 0xfd
@@ -686,6 +691,7 @@ const (
SizeofRtAttr = 0x4
SizeofIfInfomsg = 0x10
SizeofIfAddrmsg = 0x8
+ SizeofIfAddrlblmsg = 0xc
SizeofIfaCacheinfo = 0x10
SizeofRtMsg = 0xc
SizeofRtNexthop = 0x8
@@ -737,6 +743,15 @@ type IfAddrmsg struct {
Index uint32
}
+type IfAddrlblmsg struct {
+ Family uint8
+ _ uint8
+ Prefixlen uint8
+ Flags uint8
+ Index uint32
+ Seq uint32
+}
+
type IfaCacheinfo struct {
Prefered uint32
Valid uint32
@@ -2317,6 +2332,11 @@ const (
NFT_CT_AVGPKT = 0x10
NFT_CT_ZONE = 0x11
NFT_CT_EVENTMASK = 0x12
+ NFT_CT_SRC_IP = 0x13
+ NFT_CT_DST_IP = 0x14
+ NFT_CT_SRC_IP6 = 0x15
+ NFT_CT_DST_IP6 = 0x16
+ NFT_CT_ID = 0x17
NFTA_CT_UNSPEC = 0x0
NFTA_CT_DREG = 0x1
NFTA_CT_KEY = 0x2
@@ -2597,8 +2617,8 @@ const (
SOF_TIMESTAMPING_BIND_PHC = 0x8000
SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000
- SOF_TIMESTAMPING_LAST = 0x20000
- SOF_TIMESTAMPING_MASK = 0x3ffff
+ SOF_TIMESTAMPING_LAST = 0x40000
+ SOF_TIMESTAMPING_MASK = 0x7ffff
SCM_TSTAMP_SND = 0x0
SCM_TSTAMP_SCHED = 0x1
@@ -3044,6 +3064,23 @@ const (
)
const (
+ TCA_UNSPEC = 0x0
+ TCA_KIND = 0x1
+ TCA_OPTIONS = 0x2
+ TCA_STATS = 0x3
+ TCA_XSTATS = 0x4
+ TCA_RATE = 0x5
+ TCA_FCNT = 0x6
+ TCA_STATS2 = 0x7
+ TCA_STAB = 0x8
+ TCA_PAD = 0x9
+ TCA_DUMP_INVISIBLE = 0xa
+ TCA_CHAIN = 0xb
+ TCA_HW_OFFLOAD = 0xc
+ TCA_INGRESS_BLOCK = 0xd
+ TCA_EGRESS_BLOCK = 0xe
+ TCA_DUMP_FLAGS = 0xf
+ TCA_EXT_WARN_MSG = 0x10
RTNLGRP_NONE = 0x0
RTNLGRP_LINK = 0x1
RTNLGRP_NOTIFY = 0x2
@@ -3078,6 +3115,18 @@ const (
RTNLGRP_IPV6_MROUTE_R = 0x1f
RTNLGRP_NEXTHOP = 0x20
RTNLGRP_BRVLAN = 0x21
+ RTNLGRP_MCTP_IFADDR = 0x22
+ RTNLGRP_TUNNEL = 0x23
+ RTNLGRP_STATS = 0x24
+ RTNLGRP_IPV4_MCADDR = 0x25
+ RTNLGRP_IPV6_MCADDR = 0x26
+ RTNLGRP_IPV6_ACADDR = 0x27
+ TCA_ROOT_UNSPEC = 0x0
+ TCA_ROOT_TAB = 0x1
+ TCA_ROOT_FLAGS = 0x2
+ TCA_ROOT_COUNT = 0x3
+ TCA_ROOT_TIME_DELTA = 0x4
+ TCA_ROOT_EXT_WARN_MSG = 0x5
)
type CapUserHeader struct {
@@ -3541,6 +3590,8 @@ type Nhmsg struct {
Flags uint32
}
+const SizeofNhmsg = 0x8
+
type NexthopGrp struct {
Id uint32
Weight uint8
@@ -3548,6 +3599,8 @@ type NexthopGrp struct {
Resvd2 uint16
}
+const SizeofNexthopGrp = 0x8
+
const (
NHA_UNSPEC = 0x0
NHA_ID = 0x1
@@ -4044,7 +4097,7 @@ const (
ETHTOOL_A_TSINFO_PHC_INDEX = 0x5
ETHTOOL_A_TSINFO_STATS = 0x6
ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7
- ETHTOOL_A_TSINFO_MAX = 0x7
+ ETHTOOL_A_TSINFO_MAX = 0x9
ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0
ETHTOOL_A_CABLE_TEST_HEADER = 0x1
ETHTOOL_A_CABLE_TEST_MAX = 0x1
@@ -4130,6 +4183,19 @@ const (
ETHTOOL_A_TUNNEL_INFO_MAX = 0x2
)
+const (
+ TCP_V4_FLOW = 0x1
+ UDP_V4_FLOW = 0x2
+ TCP_V6_FLOW = 0x5
+ UDP_V6_FLOW = 0x6
+ ESP_V4_FLOW = 0xa
+ ESP_V6_FLOW = 0xc
+ IP_USER_FLOW = 0xd
+ IPV6_USER_FLOW = 0xe
+ IPV6_FLOW = 0x11
+ ETHER_FLOW = 0x12
+)
+
const SPEED_UNKNOWN = -0x1
type EthtoolDrvinfo struct {
@@ -4780,7 +4846,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x150
+ NL80211_ATTR_MAX = 0x151
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143
@@ -5414,7 +5480,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x21
+ NL80211_FREQUENCY_ATTR_MAX = 0x22
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
@@ -5530,7 +5596,7 @@ const (
NL80211_MAX_SUPP_SELECTORS = 0x80
NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5
NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3
- NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5
+ NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6
NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2
NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1
NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4
@@ -6270,3 +6336,30 @@ type SockDiagReq struct {
}
const RTM_NEWNVLAN = 0x70
+
+const (
+ MPOL_BIND = 0x2
+ MPOL_DEFAULT = 0x0
+ MPOL_F_ADDR = 0x2
+ MPOL_F_MEMS_ALLOWED = 0x4
+ MPOL_F_MOF = 0x8
+ MPOL_F_MORON = 0x10
+ MPOL_F_NODE = 0x1
+ MPOL_F_NUMA_BALANCING = 0x2000
+ MPOL_F_RELATIVE_NODES = 0x4000
+ MPOL_F_SHARED = 0x1
+ MPOL_F_STATIC_NODES = 0x8000
+ MPOL_INTERLEAVE = 0x3
+ MPOL_LOCAL = 0x4
+ MPOL_MAX = 0x7
+ MPOL_MF_INTERNAL = 0x10
+ MPOL_MF_LAZY = 0x8
+ MPOL_MF_MOVE_ALL = 0x4
+ MPOL_MF_MOVE = 0x2
+ MPOL_MF_STRICT = 0x1
+ MPOL_MF_VALID = 0x7
+ MPOL_MODE_FLAGS = 0xe000
+ MPOL_PREFERRED = 0x1
+ MPOL_PREFERRED_MANY = 0x5
+ MPOL_WEIGHTED_INTERLEAVE = 0x6
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 62db85f6cb..485f2d3a1b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -282,19 +282,13 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -330,17 +324,11 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
_ [4]byte
Ac_tgetime uint64
@@ -348,10 +336,22 @@ type Taskstats struct {
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 7d89d648d9..ecbd1ad8bc 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -300,16 +300,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -344,27 +338,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 9c0b39eec7..02f0463a44 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -91,7 +91,7 @@ type Stat_t struct {
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -273,19 +273,13 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]uint8
@@ -321,17 +315,11 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
_ [4]byte
Ac_tgetime uint64
@@ -339,10 +327,22 @@ type Taskstats struct {
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index de9c7ff36c..6f4d400d24 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -279,16 +279,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -323,27 +317,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 2336bd2bf0..cd532cfa55 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -280,16 +280,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -324,27 +318,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 4711f0be16..4133620851 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -278,19 +278,13 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -326,17 +320,11 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
_ [4]byte
Ac_tgetime uint64
@@ -344,10 +332,22 @@ type Taskstats struct {
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index ab99a34b99..eaa37eb718 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -282,16 +282,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -326,27 +320,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 04c9866e3c..98ae6a1e4a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -282,16 +282,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -326,27 +320,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 60aa69f618..cae1961594 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -278,19 +278,13 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -326,17 +320,11 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
_ [4]byte
Ac_tgetime uint64
@@ -344,10 +332,22 @@ type Taskstats struct {
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index cb4fad785d..6ce3b4e028 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -90,7 +90,7 @@ type Stat_t struct {
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -285,19 +285,13 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]uint8
@@ -333,17 +327,11 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
_ [4]byte
Ac_tgetime uint64
@@ -351,10 +339,22 @@ type Taskstats struct {
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 60272cfce8..c7429c6a14 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -289,16 +289,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]uint8
@@ -333,27 +327,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 3f5b91bc0d..4bf4baf4ca 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -289,16 +289,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]uint8
@@ -333,27 +327,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 51550f15a6..e9709d70af 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -307,16 +307,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]uint8
@@ -351,27 +345,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index 3239e50e0e..fb44268ca7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -302,16 +302,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -346,27 +340,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index faf2002783..9c38265c74 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -284,16 +284,10 @@ type Taskstats struct {
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
- Cpu_delay_max uint64
- Cpu_delay_min uint64
Blkio_count uint64
Blkio_delay_total uint64
- Blkio_delay_max uint64
- Blkio_delay_min uint64
Swapin_count uint64
Swapin_delay_total uint64
- Swapin_delay_max uint64
- Swapin_delay_min uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
@@ -328,27 +322,33 @@ type Taskstats struct {
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
- Freepages_delay_max uint64
- Freepages_delay_min uint64
Thrashing_count uint64
Thrashing_delay_total uint64
- Thrashing_delay_max uint64
- Thrashing_delay_min uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
- Compact_delay_max uint64
- Compact_delay_min uint64
Ac_tgid uint32
Ac_tgetime uint64
Ac_exe_dev uint64
Ac_exe_inode uint64
Wpcopy_count uint64
Wpcopy_delay_total uint64
- Wpcopy_delay_max uint64
- Wpcopy_delay_min uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
Irq_delay_max uint64
Irq_delay_min uint64
}
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
index fc1835d8a2..bc1ce4360b 100644
--- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -52,7 +52,7 @@ var (
)
func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+ r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -60,7 +60,7 @@ func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall
}
func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+ r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -68,7 +68,7 @@ func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *
}
func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+ r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -76,7 +76,7 @@ func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
}
func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -84,7 +84,7 @@ func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
}
func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+ r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -92,7 +92,7 @@ func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint3
}
func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -100,7 +100,7 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint
}
func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+ r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -108,7 +108,7 @@ func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype
}
func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 640f6b153f..69439df2a4 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
+//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
@@ -890,8 +892,12 @@ const socket_error = uintptr(^uint32(0))
//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2
+//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2
//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable
//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2
//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
@@ -914,6 +920,17 @@ type RawSockaddrInet6 struct {
Scope_id uint32
}
+// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See
+// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet.
+//
+// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using
+// unsafe, depending on the address family.
+type RawSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Data [6]uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 958bcf47a3..6e4f50eb48 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -65,6 +65,22 @@ var signals = [...]string{
15: "terminated",
}
+// File flags for [os.OpenFile]. The O_ prefix is used to indicate
+// that these flags are specific to the OpenFile function.
+const (
+ O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
+ O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
+ O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
+ O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
+ O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
+ O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
+ O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
+ O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
+ O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
+ O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
+ O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
+)
+
const (
FILE_READ_DATA = 0x00000001
FILE_READ_ATTRIBUTES = 0x00000080
@@ -1976,6 +1992,12 @@ const (
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
)
+// FILE_ZERO_DATA_INFORMATION from winioctl.h
+type FileZeroDataInformation struct {
+ FileOffset int64
+ BeyondFinalZero int64
+}
+
const (
ComputerNameNetBIOS = 0
ComputerNameDnsHostname = 1
@@ -2298,6 +2320,82 @@ type MibIfRow2 struct {
OutQLen uint64
}
+// IP_ADDRESS_PREFIX stores an IP address prefix. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix.
+type IpAddressPrefix struct {
+ Prefix RawSockaddrInet
+ PrefixLength uint8
+}
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin.
+const (
+ NlroManual = 0
+ NlroWellKnown = 1
+ NlroDHCP = 2
+ NlroRouterAdvertisement = 3
+ Nlro6to4 = 4
+)
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol.
+const (
+ MIB_IPPROTO_OTHER = 1
+ MIB_IPPROTO_LOCAL = 2
+ MIB_IPPROTO_NETMGMT = 3
+ MIB_IPPROTO_ICMP = 4
+ MIB_IPPROTO_EGP = 5
+ MIB_IPPROTO_GGP = 6
+ MIB_IPPROTO_HELLO = 7
+ MIB_IPPROTO_RIP = 8
+ MIB_IPPROTO_IS_IS = 9
+ MIB_IPPROTO_ES_IS = 10
+ MIB_IPPROTO_CISCO = 11
+ MIB_IPPROTO_BBN = 12
+ MIB_IPPROTO_OSPF = 13
+ MIB_IPPROTO_BGP = 14
+ MIB_IPPROTO_IDPR = 15
+ MIB_IPPROTO_EIGRP = 16
+ MIB_IPPROTO_DVMRP = 17
+ MIB_IPPROTO_RPL = 18
+ MIB_IPPROTO_DHCP = 19
+ MIB_IPPROTO_NT_AUTOSTATIC = 10002
+ MIB_IPPROTO_NT_STATIC = 10006
+ MIB_IPPROTO_NT_STATIC_NON_DOD = 10007
+)
+
+// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2.
+type MibIpForwardRow2 struct {
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ DestinationPrefix IpAddressPrefix
+ NextHop RawSockaddrInet
+ SitePrefixLength uint8
+ ValidLifetime uint32
+ PreferredLifetime uint32
+ Metric uint32
+ Protocol uint32
+ Loopback uint8
+ AutoconfigureAddress uint8
+ Publish uint8
+ Immortal uint8
+ Age uint32
+ Origin uint32
+}
+
+// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2.
+type MibIpForwardTable2 struct {
+ NumEntries uint32
+ Table [1]MibIpForwardRow2
+}
+
+// Rows returns the IP route entries in the table.
+func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 {
+ return unsafe.Slice(&t.Table[0], t.NumEntries)
+}
+
// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
type MibUnicastIpAddressRow struct {
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index a58bc48b8e..f25b7308a1 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -182,13 +182,17 @@ var (
procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2")
+ procFreeMibTable = modiphlpapi.NewProc("FreeMibTable")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex")
+ procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2")
+ procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2")
procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+ procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2")
procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
@@ -238,6 +242,7 @@ var (
procFindResourceW = modkernel32.NewProc("FindResourceW")
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose")
+ procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer")
procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers")
procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
@@ -284,6 +289,7 @@ var (
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId")
+ procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
procGetProcAddress = modkernel32.NewProc("GetProcAddress")
@@ -546,25 +552,25 @@ var (
)
func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) {
- r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0)
+ r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error))
ret = Errno(r0)
return
}
@@ -574,7 +580,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups,
if resetToDefault {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -586,7 +592,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok
if disableAllPrivileges {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -594,7 +600,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok
}
func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -602,7 +608,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s
}
func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
+ r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -610,7 +616,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries
}
func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -618,7 +624,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err
}
func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0)
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -626,7 +632,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e
}
func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
+ r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -634,7 +640,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (
}
func CloseServiceHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -642,7 +648,7 @@ func CloseServiceHandle(handle Handle) (err error) {
}
func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
+ r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -650,7 +656,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -658,7 +664,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR
}
func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -675,7 +681,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui
}
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -683,7 +689,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
}
func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -691,7 +697,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
}
func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
+ r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -703,7 +709,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -711,7 +717,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc
}
func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -720,7 +726,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access
}
func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -728,7 +734,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s
}
func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -736,7 +742,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16
}
func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+ r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -744,7 +750,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
}
func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -752,7 +758,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
}
func DeleteService(service Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -760,7 +766,7 @@ func DeleteService(service Handle) (err error) {
}
func DeregisterEventSource(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -768,7 +774,7 @@ func DeregisterEventSource(handle Handle) (err error) {
}
func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) {
- r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
+ r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -776,7 +782,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes
}
func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -784,7 +790,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_
}
func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -792,13 +798,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv
}
func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) {
- r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0)
+ r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)))
isEqual = r0 != 0
return
}
func FreeSid(sid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid)))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -806,7 +812,7 @@ func FreeSid(sid *SID) (err error) {
}
func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
- r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+ r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -814,7 +820,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
}
func GetLengthSid(sid *SID) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid)))
len = uint32(r0)
return
}
@@ -829,7 +835,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security
}
func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -837,7 +843,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi
}
func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -853,7 +859,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl
if *daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)))
*daclPresent = _p0 != 0
*daclDefaulted = _p1 != 0
if r1 == 0 {
@@ -867,7 +873,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau
if *groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
*groupDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -876,7 +882,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau
}
func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd)))
len = uint32(r0)
return
}
@@ -886,7 +892,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau
if *ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
*ownerDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -895,7 +901,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau
}
func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -911,7 +917,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl
if *saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)))
*saclPresent = _p0 != 0
*saclDefaulted = _p1 != 0
if r1 == 0 {
@@ -921,7 +927,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl
}
func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -929,25 +935,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati
}
func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) {
- r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid)))
authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0))
return
}
func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index))
subAuthority = (*uint32)(unsafe.Pointer(r0))
return
}
func getSidSubAuthorityCount(sid *SID) (count *uint8) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid)))
count = (*uint8)(unsafe.Pointer(r0))
return
}
func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -955,7 +961,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint
}
func ImpersonateSelf(impersonationlevel uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -963,7 +969,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) {
}
func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -979,7 +985,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint
if rebootAfterShutdown {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
+ r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -987,7 +993,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint
}
func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
- r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle))
ret = r0 != 0
if !ret {
err = errnoErr(e1)
@@ -996,25 +1002,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
}
func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd)))
isValid = r0 != 0
return
}
func isValidSid(sid *SID) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid)))
isValid = r0 != 0
return
}
func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) {
- r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0)
+ r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType))
isWellKnown = r0 != 0
return
}
func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1022,7 +1028,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen
}
func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1030,7 +1036,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3
}
func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) {
- r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1038,7 +1044,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err
}
func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1046,7 +1052,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE
}
func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
+ r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1054,7 +1060,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT
}
func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) {
- r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
+ r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1062,7 +1068,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV
}
func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
+ r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1070,7 +1076,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
}
func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1079,7 +1085,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha
}
func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1092,7 +1098,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token
if openAsSelf {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1100,7 +1106,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token
}
func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1108,7 +1114,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize
}
func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1120,7 +1126,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
+ r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1128,7 +1134,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf
}
func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1136,7 +1142,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b
}
func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1144,7 +1150,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
}
func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1152,7 +1158,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize
}
func RegCloseKey(key Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1160,7 +1166,7 @@ func RegCloseKey(key Handle) (regerrno error) {
}
func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0)
+ r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1176,7 +1182,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32,
if asynchronous {
_p1 = 1
}
- r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0)
+ r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1184,7 +1190,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32,
}
func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1192,7 +1198,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint
}
func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
+ r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1200,7 +1206,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint
}
func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
+ r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1208,7 +1214,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32
}
func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0)
+ r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1217,7 +1223,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand
}
func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
+ r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1226,7 +1232,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont
}
func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
+ r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1234,7 +1240,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS
}
func RevertToSelf() (err error) {
- r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1242,7 +1248,7 @@ func RevertToSelf() (err error) {
}
func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) {
- r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1250,7 +1256,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE
}
func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
- r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
+ r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1267,7 +1273,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security
}
func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1275,7 +1281,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi
}
func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) {
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1291,7 +1297,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *
if daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1303,7 +1309,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul
if groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1315,7 +1321,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul
if ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1323,7 +1329,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul
}
func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) {
- syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
return
}
@@ -1336,7 +1342,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *
if saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1344,7 +1350,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *
}
func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1352,7 +1358,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati
}
func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1360,7 +1366,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error)
}
func SetThreadToken(thread *Handle, token Token) (err error) {
- r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0)
+ r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1368,7 +1374,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) {
}
func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1376,7 +1382,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint
}
func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1384,7 +1390,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
}
func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
+ r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1392,7 +1398,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro
}
func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1400,7 +1406,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad
}
func CertCloseStore(store Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1408,7 +1414,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) {
}
func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
+ r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1417,7 +1423,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en
}
func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1425,13 +1431,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
}
func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) {
- r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext)))
dupContext = (*CertContext)(unsafe.Pointer(r0))
return
}
func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext)))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1440,7 +1446,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex
}
func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
cert = (*CertContext)(unsafe.Pointer(r0))
if cert == nil {
err = errnoErr(e1)
@@ -1449,7 +1455,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags
}
func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
certchain = (*CertChainContext)(unsafe.Pointer(r0))
if certchain == nil {
err = errnoErr(e1)
@@ -1458,18 +1464,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3
}
func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) {
- r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
+ r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
ret = (*CertExtension)(unsafe.Pointer(r0))
return
}
func CertFreeCertificateChain(ctx *CertChainContext) {
- syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx)))
return
}
func CertFreeCertificateContext(ctx *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1477,7 +1483,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) {
}
func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) {
- r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0)
+ r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1485,13 +1491,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a
}
func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) {
- r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
+ r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
chars = uint32(r0)
return
}
func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1500,7 +1506,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr
}
func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name)))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1509,7 +1515,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
}
func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1521,7 +1527,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete
if *callerFreeProvOrNCryptKey {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
*callerFreeProvOrNCryptKey = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -1530,7 +1536,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete
}
func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1538,7 +1544,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte
}
func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1546,7 +1552,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob,
}
func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1554,7 +1560,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT
}
func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1562,7 +1568,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl
}
func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1571,7 +1577,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto
}
func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) {
- r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0)
+ r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)))
same = r0 != 0
return
}
@@ -1586,7 +1592,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR
}
func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
- r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
+ r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
if r0 != 0 {
status = syscall.Errno(r0)
}
@@ -1594,12 +1600,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN
}
func DnsRecordListFree(rl *DNSRecord, freetype uint32) {
- syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0)
+ syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype))
return
}
func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1607,7 +1613,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
}
func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1615,15 +1621,20 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
}
func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
- r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
+func FreeMibTable(memory unsafe.Pointer) {
+ syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory))
+ return
+}
+
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1631,7 +1642,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter
}
func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1639,7 +1650,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
}
func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0)
+ r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1647,7 +1658,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod
}
func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1655,7 +1666,23 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
}
func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1663,7 +1690,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
}
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1675,7 +1702,19 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+ var _p0 uint32
+ if initialNotification {
+ _p0 = 1
+ }
+ r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1687,7 +1726,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1695,7 +1734,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext
}
func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path)))
cookie = uintptr(r0)
if cookie == 0 {
err = errnoErr(e1)
@@ -1704,7 +1743,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
}
func AssignProcessToJobObject(job Handle, process Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0)
+ r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1712,7 +1751,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) {
}
func CancelIo(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1720,7 +1759,7 @@ func CancelIo(s Handle) (err error) {
}
func CancelIoEx(s Handle, o *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1728,7 +1767,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) {
}
func ClearCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1736,7 +1775,7 @@ func ClearCommBreak(handle Handle) (err error) {
}
func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
+ r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1744,7 +1783,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error
}
func CloseHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1752,12 +1791,12 @@ func CloseHandle(handle Handle) (err error) {
}
func ClosePseudoConsole(console Handle) {
- syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0)
+ syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console))
return
}
func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1765,7 +1804,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
}
func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1773,7 +1812,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
}
func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1782,7 +1821,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d
}
func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1791,7 +1830,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat
}
func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1800,7 +1839,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS
}
func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1809,7 +1848,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes
}
func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1817,7 +1856,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr
}
func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1826,7 +1865,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr
}
func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1835,7 +1874,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle,
}
func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1848,7 +1887,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16
if initialOwner {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1857,7 +1896,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16
}
func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1866,7 +1905,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u
}
func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1878,7 +1917,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1886,7 +1925,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA
}
func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) {
- r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0)
+ r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -1894,7 +1933,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons
}
func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1902,7 +1941,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u
}
func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1911,7 +1950,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er
}
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
+ r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1919,7 +1958,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err
}
func DeleteFile(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1927,12 +1966,12 @@ func DeleteFile(path *uint16) (err error) {
}
func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) {
- syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0)
+ syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)))
return
}
func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1940,7 +1979,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
}
func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1948,7 +1987,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff
}
func DisconnectNamedPipe(pipe Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1960,7 +1999,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP
if bInheritHandle {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1968,7 +2007,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP
}
func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0)
+ r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1976,12 +2015,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
}
func ExitProcess(exitcode uint32) {
- syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0)
+ syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode))
return
}
func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -1990,7 +2029,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32,
}
func FindClose(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1998,7 +2037,7 @@ func FindClose(handle Handle) (err error) {
}
func FindCloseChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2019,7 +2058,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter
if watchSubtree {
_p1 = 1
}
- r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
+ r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2028,7 +2067,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter
}
func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2037,7 +2076,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro
}
func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2046,7 +2085,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b
}
func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2055,7 +2094,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er
}
func FindNextChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2063,7 +2102,7 @@ func FindNextChangeNotification(handle Handle) (err error) {
}
func findNextFile1(handle Handle, data *win32finddata1) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2071,7 +2110,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) {
}
func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2079,7 +2118,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin
}
func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2087,7 +2126,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32)
}
func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType))
+ r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType))
resInfo = Handle(r0)
if resInfo == 0 {
err = errnoErr(e1)
@@ -2096,7 +2135,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle,
}
func FindVolumeClose(findVolume Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2104,7 +2143,15 @@ func FindVolumeClose(findVolume Handle) (err error) {
}
func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func FlushConsoleInputBuffer(console Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2112,7 +2159,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
}
func FlushFileBuffers(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2120,7 +2167,7 @@ func FlushFileBuffers(handle Handle) (err error) {
}
func FlushViewOfFile(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2132,7 +2179,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2141,7 +2188,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu
}
func FreeEnvironmentStrings(envs *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2149,7 +2196,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) {
}
func FreeLibrary(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2157,7 +2204,7 @@ func FreeLibrary(handle Handle) (err error) {
}
func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0)
+ r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2165,19 +2212,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro
}
func GetACP() (acp uint32) {
- r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetACP.Addr())
acp = uint32(r0)
return
}
func GetActiveProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2185,7 +2232,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
}
func GetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2193,7 +2240,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) {
}
func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2201,13 +2248,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
}
func GetCommandLine() (cmd *uint16) {
- r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr())
cmd = (*uint16)(unsafe.Pointer(r0))
return
}
func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2215,7 +2262,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
}
func GetComputerName(buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2223,7 +2270,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) {
}
func GetConsoleCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2232,7 +2279,7 @@ func GetConsoleCP() (cp uint32, err error) {
}
func GetConsoleMode(console Handle, mode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2240,7 +2287,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) {
}
func GetConsoleOutputCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2249,7 +2296,7 @@ func GetConsoleOutputCP() (cp uint32, err error) {
}
func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2257,7 +2304,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (
}
func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2266,19 +2313,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
}
func GetCurrentProcessId() (pid uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr())
pid = uint32(r0)
return
}
func GetCurrentThreadId() (id uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr())
id = uint32(r0)
return
}
func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2286,13 +2333,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6
}
func GetDriveType(rootPathName *uint16) (driveType uint32) {
- r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName)))
driveType = uint32(r0)
return
}
func GetEnvironmentStrings() (envs *uint16, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr())
envs = (*uint16)(unsafe.Pointer(r0))
if envs == nil {
err = errnoErr(e1)
@@ -2301,7 +2348,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) {
}
func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2310,7 +2357,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32
}
func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2318,7 +2365,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
}
func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2326,7 +2373,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
}
func GetFileAttributes(name *uint16) (attrs uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)))
attrs = uint32(r0)
if attrs == INVALID_FILE_ATTRIBUTES {
err = errnoErr(e1)
@@ -2335,7 +2382,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) {
}
func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2343,7 +2390,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e
}
func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2351,7 +2398,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte,
}
func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2359,7 +2406,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim
}
func GetFileType(filehandle Handle) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2368,7 +2415,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) {
}
func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2377,7 +2424,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32
}
func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2386,13 +2433,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (
}
func GetLargePageMinimum() (size uintptr) {
- r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr())
size = uintptr(r0)
return
}
func GetLastError() (lasterr error) {
- r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLastError.Addr())
if r0 != 0 {
lasterr = syscall.Errno(r0)
}
@@ -2400,7 +2447,7 @@ func GetLastError() (lasterr error) {
}
func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2409,7 +2456,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err
}
func GetLogicalDrives() (drivesBitMask uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr())
drivesBitMask = uint32(r0)
if drivesBitMask == 0 {
err = errnoErr(e1)
@@ -2418,7 +2465,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) {
}
func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2427,13 +2474,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er
}
func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2442,7 +2489,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32,
}
func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
+ r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2450,7 +2497,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er
}
func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2458,7 +2505,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro
}
func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2466,7 +2513,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m
}
func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2474,7 +2521,15 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3
}
func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2486,7 +2541,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2494,7 +2549,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa
}
func GetPriorityClass(process Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process))
ret = uint32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -2512,7 +2567,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) {
}
func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname)))
proc = uintptr(r0)
if proc == 0 {
err = errnoErr(e1)
@@ -2521,7 +2576,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
}
func GetProcessId(process Handle) (id uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process))
id = uint32(r0)
if id == 0 {
err = errnoErr(e1)
@@ -2530,7 +2585,7 @@ func GetProcessId(process Handle) (id uint32, err error) {
}
func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2538,7 +2593,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin
}
func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2546,7 +2601,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
}
func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2554,12 +2609,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime,
}
func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
- syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0)
+ syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)))
return
}
func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
+ r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2567,7 +2622,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl
}
func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2576,12 +2631,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin
}
func getStartupInfo(startupInfo *StartupInfo) {
- syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
+ syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo)))
return
}
func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2590,7 +2645,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
}
func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2599,7 +2654,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
}
func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2607,17 +2662,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint
}
func GetSystemTimeAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func GetSystemTimePreciseAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2626,7 +2681,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro
}
func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2635,7 +2690,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
}
func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2643,13 +2698,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint
}
func getTickCount64() (ms uint64) {
- r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr())
ms = uint64(r0)
return
}
func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi)))
rc = uint32(r0)
if rc == 0xffffffff {
err = errnoErr(e1)
@@ -2658,7 +2713,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
}
func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2666,7 +2721,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16
}
func GetVersion() (ver uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetVersion.Addr())
ver = uint32(r0)
if ver == 0 {
err = errnoErr(e1)
@@ -2675,7 +2730,7 @@ func GetVersion() (ver uint32, err error) {
}
func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2683,7 +2738,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN
}
func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2691,7 +2746,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume
}
func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2699,7 +2754,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint
}
func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2707,7 +2762,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui
}
func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2715,7 +2770,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16
}
func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2724,7 +2779,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
}
func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2736,7 +2791,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) {
if *isWow64 {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0)
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0)))
*isWow64 = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -2749,7 +2804,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2766,7 +2821,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e
}
func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2784,7 +2839,7 @@ func LoadLibrary(libname string) (handle Handle, err error) {
}
func _LoadLibrary(libname *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2793,7 +2848,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) {
}
func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo))
resData = Handle(r0)
if resData == 0 {
err = errnoErr(e1)
@@ -2802,7 +2857,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
}
func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length))
ptr = uintptr(r0)
if ptr == 0 {
err = errnoErr(e1)
@@ -2811,7 +2866,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
}
func LocalFree(hmem Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem))
handle = Handle(r0)
if handle != 0 {
err = errnoErr(e1)
@@ -2820,7 +2875,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) {
}
func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
+ r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2828,7 +2883,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt
}
func LockResource(resData Handle) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2837,7 +2892,7 @@ func LockResource(resData Handle) (addr uintptr, err error) {
}
func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2846,7 +2901,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui
}
func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2854,7 +2909,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
}
func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2862,7 +2917,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
}
func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2870,7 +2925,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
}
func MoveFile(from *uint16, to *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0)
+ r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2878,7 +2933,7 @@ func MoveFile(from *uint16, to *uint16) (err error) {
}
func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+ r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
nwrite = int32(r0)
if nwrite == 0 {
err = errnoErr(e1)
@@ -2891,7 +2946,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2904,7 +2959,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2917,7 +2972,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
+ r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2930,7 +2985,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
+ r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2939,7 +2994,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand
}
func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2947,7 +3002,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla
}
func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2955,7 +3010,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
}
func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2963,7 +3018,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
}
func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2971,7 +3026,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
}
func PulseEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2979,7 +3034,7 @@ func PulseEvent(event Handle) (err error) {
}
func PurgeComm(handle Handle, dwFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0)
+ r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2987,7 +3042,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) {
}
func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
+ r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2996,7 +3051,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3
}
func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3004,7 +3059,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size
}
func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3012,7 +3067,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO
}
func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3024,7 +3079,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree
if watchSubTree {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0)
+ r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3036,7 +3091,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3044,7 +3099,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (
}
func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3052,7 +3107,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u
}
func ReleaseMutex(mutex Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3060,7 +3115,7 @@ func ReleaseMutex(mutex Handle) (err error) {
}
func RemoveDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3068,7 +3123,7 @@ func RemoveDirectory(path *uint16) (err error) {
}
func RemoveDllDirectory(cookie uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3076,7 +3131,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) {
}
func ResetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3084,7 +3139,7 @@ func ResetEvent(event Handle) (err error) {
}
func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
- r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0)
+ r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -3092,7 +3147,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
}
func ResumeThread(thread Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread))
ret = uint32(r0)
if ret == 0xffffffff {
err = errnoErr(e1)
@@ -3101,7 +3156,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) {
}
func SetCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3109,7 +3164,7 @@ func SetCommBreak(handle Handle) (err error) {
}
func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3117,7 +3172,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
}
func SetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3125,7 +3180,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) {
}
func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3133,7 +3188,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
}
func SetConsoleCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3141,7 +3196,7 @@ func SetConsoleCP(cp uint32) (err error) {
}
func setConsoleCursorPosition(console Handle, position uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3149,7 +3204,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) {
}
func SetConsoleMode(console Handle, mode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3157,7 +3212,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) {
}
func SetConsoleOutputCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3165,7 +3220,7 @@ func SetConsoleOutputCP(cp uint32) (err error) {
}
func SetCurrentDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3173,7 +3228,7 @@ func SetCurrentDirectory(path *uint16) (err error) {
}
func SetDefaultDllDirectories(directoryFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3190,7 +3245,7 @@ func SetDllDirectory(path string) (err error) {
}
func _SetDllDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3198,7 +3253,7 @@ func _SetDllDirectory(path *uint16) (err error) {
}
func SetEndOfFile(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3206,7 +3261,7 @@ func SetEndOfFile(handle Handle) (err error) {
}
func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3214,13 +3269,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
}
func SetErrorMode(mode uint32) (ret uint32) {
- r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode))
ret = uint32(r0)
return
}
func SetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3228,7 +3283,7 @@ func SetEvent(event Handle) (err error) {
}
func SetFileAttributes(name *uint16, attrs uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3236,7 +3291,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) {
}
func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3244,7 +3299,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error)
}
func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3252,7 +3307,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB
}
func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence))
newlowoffset = uint32(r0)
if newlowoffset == 0xffffffff {
err = errnoErr(e1)
@@ -3261,7 +3316,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence
}
func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3269,7 +3324,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim
}
func SetFileValidData(handle Handle, validDataLength int64) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3277,7 +3332,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) {
}
func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3285,7 +3340,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error)
}
func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) {
- r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength))
ret = int(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -3294,7 +3349,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb
}
func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3302,7 +3357,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin
}
func SetPriorityClass(process Handle, priorityClass uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0)
+ r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3314,7 +3369,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
if disable {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3322,7 +3377,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
}
func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3330,7 +3385,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
}
func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3338,7 +3393,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr
}
func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
+ r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3346,7 +3401,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
}
func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3354,7 +3409,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
}
func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3362,7 +3417,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro
}
func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
+ r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3370,7 +3425,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
}
func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo))
size = uint32(r0)
if size == 0 {
err = errnoErr(e1)
@@ -3383,13 +3438,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) {
if alertable {
_p0 = 1
}
- r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0)
+ r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0))
ret = uint32(r0)
return
}
func TerminateJobObject(job Handle, exitCode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3397,7 +3452,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) {
}
func TerminateProcess(handle Handle, exitcode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3405,7 +3460,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) {
}
func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3413,7 +3468,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
}
func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3421,7 +3476,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
}
func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3429,7 +3484,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3
}
func UnmapViewOfFile(addr uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3437,7 +3492,7 @@ func UnmapViewOfFile(addr uintptr) (err error) {
}
func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3445,7 +3500,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32,
}
func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect))
value = uintptr(r0)
if value == 0 {
err = errnoErr(e1)
@@ -3454,7 +3509,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3
}
func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype))
+ r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3462,7 +3517,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
}
func VirtualLock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3470,7 +3525,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) {
}
func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3478,7 +3533,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect
}
func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3486,7 +3541,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect
}
func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
+ r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3494,7 +3549,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt
}
func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3502,7 +3557,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat
}
func VirtualUnlock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3510,13 +3565,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) {
}
func WTSGetActiveConsoleSessionId() (sessionID uint32) {
- r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr())
sessionID = uint32(r0)
return
}
func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
+ r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3528,7 +3583,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil
if waitAll {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3537,7 +3592,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil
}
func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) {
- r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3546,7 +3601,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32,
}
func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3558,7 +3613,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped)
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3566,7 +3621,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped)
}
func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3574,7 +3629,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size
}
func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3582,12 +3637,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32
}
func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) {
- syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0)
+ syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)))
return
}
func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3595,7 +3650,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint
}
func NetApiBufferFree(buf *byte) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3603,7 +3658,7 @@ func NetApiBufferFree(buf *byte) (neterr error) {
}
func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
+ r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3611,7 +3666,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete
}
func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3619,7 +3674,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr
}
func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
- r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3627,7 +3682,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by
}
func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3635,7 +3690,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO
}
func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) {
- r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3643,7 +3698,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i
}
func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0)
+ r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3651,7 +3706,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe
}
func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3659,7 +3714,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf
}
func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3667,7 +3722,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte,
}
func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3675,7 +3730,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P
}
func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
+ r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3683,13 +3738,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL
}
func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
+ r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
ret = r0 != 0
return
}
func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3697,13 +3752,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
}
func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)))
ret = r0 != 0
return
}
func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3711,7 +3766,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile
}
func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3719,18 +3774,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString
}
func RtlGetCurrentPeb() (peb *PEB) {
- r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr())
peb = (*PEB)(unsafe.Pointer(r0))
return
}
func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) {
- syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
+ syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
return
}
func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3738,23 +3793,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
}
func RtlInitString(destinationString *NTString, sourceString *byte) {
- syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) {
- syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) {
- r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus))
ret = syscall.Errno(r0)
return
}
func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0)
+ r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3762,7 +3817,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
}
func coCreateGuid(pguid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3770,7 +3825,7 @@ func coCreateGuid(pguid *GUID) (ret error) {
}
func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) {
- r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3778,7 +3833,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable *
}
func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
- r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0)
+ r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3786,23 +3841,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
}
func CoTaskMemFree(address unsafe.Pointer) {
- syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0)
+ syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address))
return
}
func CoUninitialize() {
- syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0)
+ syscall.SyscallN(procCoUninitialize.Addr())
return
}
func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) {
- r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
+ r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
chars = int32(r0)
return
}
func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3810,7 +3865,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin
}
func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3818,7 +3873,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u
}
func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3826,7 +3881,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err
}
func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3834,7 +3889,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin
}
func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3842,7 +3897,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u
}
func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3850,7 +3905,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb
}
func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb))
+ r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3862,7 +3917,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb
if ret != nil {
return
}
- r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0)
+ r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3874,12 +3929,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) {
if err != nil {
return
}
- syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0)
+ syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription))
return
}
func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
+ r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3887,7 +3942,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er
}
func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3895,7 +3950,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint
}
func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3903,7 +3958,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3911,7 +3966,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf
}
func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3919,7 +3974,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
}
func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3927,7 +3982,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu
}
func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3935,7 +3990,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz
}
func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3944,7 +3999,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN
}
func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3952,7 +4007,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI
}
func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3960,7 +4015,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
}
func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3968,7 +4023,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3976,7 +4031,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo
}
func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3984,7 +4039,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d
}
func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3993,7 +4048,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp
}
func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4001,7 +4056,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4009,7 +4064,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa
}
func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4017,7 +4072,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf
}
func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4025,7 +4080,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4033,7 +4088,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4041,7 +4096,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev
}
func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4049,7 +4104,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4057,7 +4112,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4065,7 +4120,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
+ r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
key = Handle(r0)
if key == InvalidHandle {
err = errnoErr(e1)
@@ -4074,7 +4129,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc
}
func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4082,7 +4137,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4090,7 +4145,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf
}
func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4098,7 +4153,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev
}
func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4106,7 +4161,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4114,7 +4169,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4122,7 +4177,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er
}
func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
- r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0)
+ r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)))
argv = (**uint16)(unsafe.Pointer(r0))
if argv == nil {
err = errnoErr(e1)
@@ -4131,7 +4186,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
}
func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) {
- r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4139,7 +4194,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u
}
func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
+ r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
if r1 <= 32 {
err = errnoErr(e1)
}
@@ -4147,12 +4202,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui
}
func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) {
- syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param))
+ syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param))
return
}
func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4160,7 +4215,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
}
func ExitWindowsEx(flags uint32, reason uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0)
+ r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4168,7 +4223,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) {
}
func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) {
- r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
+ r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
copied = int32(r0)
if copied == 0 {
err = errnoErr(e1)
@@ -4177,19 +4232,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e
}
func GetDesktopWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr())
hwnd = HWND(r0)
return
}
func GetForegroundWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr())
hwnd = HWND(r0)
return
}
func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4197,19 +4252,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
}
func GetKeyboardLayout(tid uint32) (hkl Handle) {
- r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid))
hkl = Handle(r0)
return
}
func GetShellWindow() (shellWindow HWND) {
- r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr())
shellWindow = HWND(r0)
return
}
func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid)))
tid = uint32(r0)
if tid == 0 {
err = errnoErr(e1)
@@ -4218,25 +4273,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
}
func IsWindow(hwnd HWND) (isWindow bool) {
- r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd))
isWindow = r0 != 0
return
}
func IsWindowUnicode(hwnd HWND) (isUnicode bool) {
- r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd))
isUnicode = r0 != 0
return
}
func IsWindowVisible(hwnd HWND) (isVisible bool) {
- r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd))
isVisible = r0 != 0
return
}
func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags))
hkl = Handle(r0)
if hkl == 0 {
err = errnoErr(e1)
@@ -4245,7 +4300,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
}
func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype))
ret = int32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -4254,13 +4309,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i
}
func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) {
- r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0)
+ r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl))
ret = int32(r0)
return
}
func UnloadKeyboardLayout(hkl Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4272,7 +4327,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (
if inheritExisting {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4280,7 +4335,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (
}
func DestroyEnvironmentBlock(block *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4288,7 +4343,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) {
}
func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
+ r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4305,7 +4360,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32
}
func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)))
bufSize = uint32(r0)
if bufSize == 0 {
err = errnoErr(e1)
@@ -4323,7 +4378,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u
}
func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4340,7 +4395,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer
}
func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4348,7 +4403,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint
}
func TimeBeginPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4356,7 +4411,7 @@ func TimeBeginPeriod(period uint32) (err error) {
}
func TimeEndPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4364,7 +4419,7 @@ func TimeEndPeriod(period uint32) (err error) {
}
func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
- r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
+ r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4372,12 +4427,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error)
}
func FreeAddrInfoW(addrinfo *AddrinfoW) {
- syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0)
+ syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo)))
return
}
func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) {
- r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4385,7 +4440,7 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul
}
func WSACleanup() (err error) {
- r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr())
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4393,7 +4448,7 @@ func WSACleanup() (err error) {
}
func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4401,7 +4456,7 @@ func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err
}
func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
- r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
+ r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4414,7 +4469,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4422,7 +4477,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f
}
func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
+ r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4430,7 +4485,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo
}
func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4438,7 +4493,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle)
}
func WSALookupServiceEnd(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4446,7 +4501,7 @@ func WSALookupServiceEnd(handle Handle) (err error) {
}
func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) {
- r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4454,7 +4509,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS
}
func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4462,7 +4517,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32
}
func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4470,7 +4525,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui
}
func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4478,7 +4533,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32,
}
func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4486,7 +4541,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32
}
func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4495,7 +4550,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo,
}
func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
- r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4503,7 +4558,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
}
func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4511,7 +4566,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
}
func Closesocket(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4519,7 +4574,7 @@ func Closesocket(s Handle) (err error) {
}
func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4536,7 +4591,7 @@ func GetHostByName(name string) (h *Hostent, err error) {
}
func _GetHostByName(name *byte) (h *Hostent, err error) {
- r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name)))
h = (*Hostent)(unsafe.Pointer(r0))
if h == nil {
err = errnoErr(e1)
@@ -4545,7 +4600,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) {
}
func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4562,7 +4617,7 @@ func GetProtoByName(name string) (p *Protoent, err error) {
}
func _GetProtoByName(name *byte) (p *Protoent, err error) {
- r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name)))
p = (*Protoent)(unsafe.Pointer(r0))
if p == nil {
err = errnoErr(e1)
@@ -4585,7 +4640,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) {
}
func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
- r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0)
+ r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)))
s = (*Servent)(unsafe.Pointer(r0))
if s == nil {
err = errnoErr(e1)
@@ -4594,7 +4649,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
}
func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4602,7 +4657,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
}
func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4610,7 +4665,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3
}
func listen(s Handle, backlog int32) (err error) {
- r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0)
+ r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4618,7 +4673,7 @@ func listen(s Handle, backlog int32) (err error) {
}
func Ntohs(netshort uint16) (u uint16) {
- r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0)
+ r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort))
u = uint16(r0)
return
}
@@ -4628,7 +4683,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4641,7 +4696,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
+ r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4649,7 +4704,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (
}
func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0)
+ r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4657,7 +4712,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32
}
func shutdown(s Handle, how int32) (err error) {
- r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0)
+ r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4665,7 +4720,7 @@ func shutdown(s Handle, how int32) (err error) {
}
func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol))
+ r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4674,7 +4729,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
}
func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4682,12 +4737,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio
}
func WTSFreeMemory(ptr uintptr) {
- syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0)
+ syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr))
return
}
func WTSQueryUserToken(session uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go
index df6bf948e1..0ddd81c02a 100644
--- a/vendor/golang.org/x/term/term_windows.go
+++ b/vendor/golang.org/x/term/term_windows.go
@@ -20,12 +20,14 @@ func isTerminal(fd int) bool {
return err == nil
}
+// This is intended to be used on a console input handle.
+// See https://learn.microsoft.com/en-us/windows/console/setconsolemode
func makeRaw(fd int) (*State, error) {
var st uint32
if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
return nil, err
}
- raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+ raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT)
raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil {
return nil, err
diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go
index 13e9a64ad1..9255449b9b 100644
--- a/vendor/golang.org/x/term/terminal.go
+++ b/vendor/golang.org/x/term/terminal.go
@@ -146,6 +146,7 @@ const (
keyCtrlD = 4
keyCtrlU = 21
keyEnter = '\r'
+ keyLF = '\n'
keyEscape = 27
keyBackspace = 127
keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
@@ -412,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) {
}
}
-// countToLeftWord returns then number of characters from the cursor to the
+// countToLeftWord returns the number of characters from the cursor to the
// start of the previous word.
func (t *Terminal) countToLeftWord() int {
if t.pos == 0 {
@@ -437,7 +438,7 @@ func (t *Terminal) countToLeftWord() int {
return t.pos - pos
}
-// countToRightWord returns then number of characters from the cursor to the
+// countToRightWord returns the number of characters from the cursor to the
// start of the next word.
func (t *Terminal) countToRightWord() int {
pos := t.pos
@@ -477,7 +478,7 @@ func visualLength(runes []rune) int {
return length
}
-// histroryAt unlocks the terminal and relocks it while calling History.At.
+// historyAt unlocks the terminal and relocks it while calling History.At.
func (t *Terminal) historyAt(idx int) (string, bool) {
t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer.
defer t.lock.Lock() // panic in At (or Len) protection.
@@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) {
// handleKey processes the given key and, optionally, returns a line of text
// that the user has entered.
func (t *Terminal) handleKey(key rune) (line string, ok bool) {
- if t.pasteActive && key != keyEnter {
+ if t.pasteActive && key != keyEnter && key != keyLF {
t.addKeyToLine(key)
return
}
@@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
t.setLine(runes, len(runes))
}
}
- case keyEnter:
+ case keyEnter, keyLF:
t.moveCursorToPos(len(t.line))
t.queue([]rune("\r\n"))
line = string(t.line)
@@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) {
if !t.pasteActive {
lineIsPasted = false
}
+ // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line.
+ if key == keyEnter && len(rest) > 0 && rest[0] == keyLF {
+ rest = rest[1:]
+ }
line, lineOk = t.handleKey(key)
}
if len(rest) > 0 {
diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go
index 9d2ae547b5..fb8273236d 100644
--- a/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -427,13 +427,6 @@ type isolatingRunSequence struct {
func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
-func maxLevel(a, b level) level {
- if a > b {
- return a
- }
- return b
-}
-
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
indexes: indexes,
types: types,
level: level,
- sos: typeForLevel(maxLevel(prevLevel, level)),
- eos: typeForLevel(maxLevel(succLevel, level)),
+ sos: typeForLevel(max(prevLevel, level)),
+ eos: typeForLevel(max(succLevel, level)),
}
}
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
index 6a66aea5ea..2a7cf70da6 100644
--- a/vendor/golang.org/x/time/LICENSE
+++ b/vendor/golang.org/x/time/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index f0e0cf3cb1..93a798ab63 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit {
// or its associated context.Context is canceled.
//
// The methods AllowN, ReserveN, and WaitN consume n tokens.
+//
+// Limiter is safe for simultaneous use by multiple goroutines.
type Limiter struct {
mu sync.Mutex
limit Limit
@@ -97,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -342,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
t, tokens := lim.advance(t)
diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go
new file mode 100644
index 0000000000..47a9a54116
--- /dev/null
+++ b/vendor/golang.org/x/tools/cover/profile.go
@@ -0,0 +1,266 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cover provides support for parsing coverage profiles
+// generated by "go test -coverprofile=cover.out".
+package cover // import "golang.org/x/tools/cover"
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Profile represents the profiling data for a specific file.
+type Profile struct {
+ FileName string
+ Mode string
+ Blocks []ProfileBlock
+}
+
+// ProfileBlock represents a single block of profiling data.
+type ProfileBlock struct {
+ StartLine, StartCol int
+ EndLine, EndCol int
+ NumStmt, Count int
+}
+
+type byFileName []*Profile
+
+func (p byFileName) Len() int { return len(p) }
+func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
+func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// ParseProfiles parses profile data in the specified file and returns a
+// Profile for each source file described therein.
+func ParseProfiles(fileName string) ([]*Profile, error) {
+ pf, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer pf.Close()
+ return ParseProfilesFromReader(pf)
+}
+
+// ParseProfilesFromReader parses profile data from the Reader and
+// returns a Profile for each source file described therein.
+func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) {
+ // First line is "mode: foo", where foo is "set", "count", or "atomic".
+ // Rest of file is in the format
+ // encoding/base64/base64.go:34.44,37.40 3 1
+ // where the fields are: name.go:line.column,line.column numberOfStatements count
+ files := make(map[string]*Profile)
+ s := bufio.NewScanner(rd)
+ mode := ""
+ for s.Scan() {
+ line := s.Text()
+ if mode == "" {
+ const p = "mode: "
+ if !strings.HasPrefix(line, p) || line == p {
+ return nil, fmt.Errorf("bad mode line: %v", line)
+ }
+ mode = line[len(p):]
+ continue
+ }
+ fn, b, err := parseLine(line)
+ if err != nil {
+ return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err)
+ }
+ p := files[fn]
+ if p == nil {
+ p = &Profile{
+ FileName: fn,
+ Mode: mode,
+ }
+ files[fn] = p
+ }
+ p.Blocks = append(p.Blocks, b)
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ for _, p := range files {
+ sort.Sort(blocksByStart(p.Blocks))
+ // Merge samples from the same location.
+ j := 1
+ for i := 1; i < len(p.Blocks); i++ {
+ b := p.Blocks[i]
+ last := p.Blocks[j-1]
+ if b.StartLine == last.StartLine &&
+ b.StartCol == last.StartCol &&
+ b.EndLine == last.EndLine &&
+ b.EndCol == last.EndCol {
+ if b.NumStmt != last.NumStmt {
+ return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
+ }
+ if mode == "set" {
+ p.Blocks[j-1].Count |= b.Count
+ } else {
+ p.Blocks[j-1].Count += b.Count
+ }
+ continue
+ }
+ p.Blocks[j] = b
+ j++
+ }
+ p.Blocks = p.Blocks[:j]
+ }
+ // Generate a sorted slice.
+ profiles := make([]*Profile, 0, len(files))
+ for _, profile := range files {
+ profiles = append(profiles, profile)
+ }
+ sort.Sort(byFileName(profiles))
+ return profiles, nil
+}
+
+// parseLine parses a line from a coverage file.
+// It is equivalent to the regex
+// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$
+//
+// However, it is much faster: https://golang.org/cl/179377
+func parseLine(l string) (fileName string, block ProfileBlock, err error) {
+ end := len(l)
+
+ b := ProfileBlock{}
+ b.Count, end, err = seekBack(l, ' ', end, "Count")
+ if err != nil {
+ return "", b, err
+ }
+ b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt")
+ if err != nil {
+ return "", b, err
+ }
+ b.EndCol, end, err = seekBack(l, '.', end, "EndCol")
+ if err != nil {
+ return "", b, err
+ }
+ b.EndLine, end, err = seekBack(l, ',', end, "EndLine")
+ if err != nil {
+ return "", b, err
+ }
+ b.StartCol, end, err = seekBack(l, '.', end, "StartCol")
+ if err != nil {
+ return "", b, err
+ }
+ b.StartLine, end, err = seekBack(l, ':', end, "StartLine")
+ if err != nil {
+ return "", b, err
+ }
+ fn := l[0:end]
+ if fn == "" {
+ return "", b, errors.New("a FileName cannot be blank")
+ }
+ return fn, b, nil
+}
+
+// seekBack searches backwards from end to find sep in l, then returns the
+// value between sep and end as an integer.
+// If seekBack fails, the returned error will reference what.
+func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) {
+ // Since we're seeking backwards and we know only ASCII is legal for these values,
+ // we can ignore the possibility of non-ASCII characters.
+ for start := end - 1; start >= 0; start-- {
+ if l[start] == sep {
+ i, err := strconv.Atoi(l[start+1 : end])
+ if err != nil {
+ return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err)
+ }
+ if i < 0 {
+ return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i)
+ }
+ return i, start, nil
+ }
+ }
+ return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what)
+}
+
+type blocksByStart []ProfileBlock
+
+func (b blocksByStart) Len() int { return len(b) }
+func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b blocksByStart) Less(i, j int) bool {
+ bi, bj := b[i], b[j]
+ return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
+}
+
+// Boundary represents the position in a source file of the beginning or end of a
+// block as reported by the coverage profile. In HTML mode, it will correspond to
+// the opening or closing of a tag and will be used to colorize the source
+type Boundary struct {
+ Offset int // Location as a byte offset in the source file.
+ Start bool // Is this the start of a block?
+ Count int // Event count from the cover profile.
+ Norm float64 // Count normalized to [0..1].
+ Index int // Order in input file.
+}
+
+// Boundaries returns a Profile as a set of Boundary objects within the provided src.
+func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
+ // Find maximum count.
+ max := 0
+ for _, b := range p.Blocks {
+ if b.Count > max {
+ max = b.Count
+ }
+ }
+ // Divisor for normalization.
+ divisor := math.Log(float64(max))
+
+ // boundary returns a Boundary, populating the Norm field with a normalized Count.
+ index := 0
+ boundary := func(offset int, start bool, count int) Boundary {
+ b := Boundary{Offset: offset, Start: start, Count: count, Index: index}
+ index++
+ if !start || count == 0 {
+ return b
+ }
+ if max <= 1 {
+ b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
+ } else if count > 0 {
+ b.Norm = math.Log(float64(count)) / divisor
+ }
+ return b
+ }
+
+ line, col := 1, 2 // TODO: Why is this 2?
+ for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
+ b := p.Blocks[bi]
+ if b.StartLine == line && b.StartCol == col {
+ boundaries = append(boundaries, boundary(si, true, b.Count))
+ }
+ if b.EndLine == line && b.EndCol == col || line > b.EndLine {
+ boundaries = append(boundaries, boundary(si, false, 0))
+ bi++
+ continue // Don't advance through src; maybe the next block starts here.
+ }
+ if src[si] == '\n' {
+ line++
+ col = 0
+ }
+ col++
+ si++
+ }
+ sort.Sort(boundariesByPos(boundaries))
+ return
+}
+
+type boundariesByPos []Boundary
+
+func (b boundariesByPos) Len() int { return len(b) }
+func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b boundariesByPos) Less(i, j int) bool {
+ if b[i].Offset == b[j].Offset {
+ // Boundaries at the same offset should be ordered according to
+ // their original position.
+ return b[i].Index < b[j].Index
+ }
+ return b[i].Offset < b[j].Offset
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go
index 31c8d2f240..7e72d3c284 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go
@@ -40,7 +40,7 @@ type Cursor struct {
// Root returns a cursor for the virtual root node,
// whose children are the files provided to [New].
//
-// Its [Cursor.Node] and [Cursor.Stack] methods return nil.
+// Its [Cursor.Node] method return nil.
func (in *Inspector) Root() Cursor {
return Cursor{in, -1}
}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
index bc44b2c8e7..a703cdfcf9 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -85,6 +85,7 @@ type event struct {
// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
// Type can be recovered from the sole bit in typ.
+// [Tried this, wasn't faster. --adonovan]
// Preorder visits all the nodes of the files supplied to New in
// depth-first order. It calls f(n) for each node n before it visits
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
index e936c67c98..9852331a3d 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -12,8 +12,6 @@ package inspector
import (
"go/ast"
"math"
-
- _ "unsafe"
)
const (
@@ -217,7 +215,6 @@ func typeOf(n ast.Node) uint64 {
return 0
}
-//go:linkname maskOf golang.org/x/tools/go/ast/inspector.maskOf
func maskOf(nodes []ast.Node) uint64 {
if len(nodes) == 0 {
return math.MaxUint64 // match all node types
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
new file mode 100644
index 0000000000..7b90bc9235
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -0,0 +1,236 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gcexportdata provides functions for reading and writing
+// export data, which is a serialized description of the API of a Go
+// package including the names, kinds, types, and locations of all
+// exported declarations.
+//
+// The standard Go compiler (cmd/compile) writes an export data file
+// for each package it compiles, which it later reads when compiling
+// packages that import the earlier one. The compiler must thus
+// contain logic to both write and read export data.
+// (See the "Export" section in the cmd/compile/README file.)
+//
+// The [Read] function in this package can read files produced by the
+// compiler, producing [go/types] data structures. As a matter of
+// policy, Read supports export data files produced by only the last
+// two Go releases plus tip; see https://go.dev/issue/68898. The
+// export data files produced by the compiler contain additional
+// details related to generics, inlining, and other optimizations that
+// cannot be decoded by the [Read] function.
+//
+// In files written by the compiler, the export data is not at the
+// start of the file. Before calling Read, use [NewReader] to locate
+// the desired portion of the file.
+//
+// The [Write] function in this package encodes the exported API of a
+// Go package ([types.Package]) as a file. Such files can be later
+// decoded by Read, but cannot be consumed by the compiler.
+//
+// # Future changes
+//
+// Although Read supports the formats written by both Write and the
+// compiler, the two are quite different, and there is an open
+// proposal (https://go.dev/issue/69491) to separate these APIs.
+//
+// Under that proposal, this package would ultimately provide only the
+// Read operation for compiler export data, which must be defined in
+// this module (golang.org/x/tools), not in the standard library, to
+// avoid version skew for developer tools that need to read compiler
+// export data both before and after a Go release, such as from Go
+// 1.23 to Go 1.24. Because this package lives in the tools module,
+// clients can update their version of the module some time before the
+// Go 1.24 release and rebuild and redeploy their tools, which will
+// then be able to consume both Go 1.23 and Go 1.24 export data files,
+// so they will work before and after the Go update. (See discussion
+// at https://go.dev/issue/15651.)
+//
+// The operations to import and export [go/types] data structures
+// would be defined in the go/types package as Import and Export.
+// [Write] would (eventually) delegate to Export,
+// and [Read], when it detects a file produced by Export,
+// would delegate to Import.
+//
+// # Deprecations
+//
+// The [NewImporter] and [Find] functions are deprecated and should
+// not be used in new code. The [WriteBundle] and [ReadBundle]
+// functions are experimental, and there is an open proposal to
+// deprecate them (https://go.dev/issue/69573).
+package gcexportdata
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/token"
+ "go/types"
+ "io"
+ "os/exec"
+
+ "golang.org/x/tools/internal/gcimporter"
+)
+
+// Find returns the name of an object (.o) or archive (.a) file
+// containing type information for the specified import path,
+// using the go command.
+// If no file was found, an empty filename is returned.
+//
+// A relative srcDir is interpreted relative to the current working directory.
+//
+// Find also returns the package's resolved (canonical) import path,
+// reflecting the effects of srcDir and vendoring on importPath.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
+func Find(importPath, srcDir string) (filename, path string) {
+ cmd := exec.Command("go", "list", "-json", "-export", "--", importPath)
+ cmd.Dir = srcDir
+ out, err := cmd.Output()
+ if err != nil {
+ return "", ""
+ }
+ var data struct {
+ ImportPath string
+ Export string
+ }
+ json.Unmarshal(out, &data)
+ return data.Export, data.ImportPath
+}
+
+// NewReader returns a reader for the export data section of an object
+// (.o) or archive (.a) file read from r. The new reader may provide
+// additional trailing data beyond the end of the export data.
+func NewReader(r io.Reader) (io.Reader, error) {
+ buf := bufio.NewReader(r)
+ size, err := gcimporter.FindExportData(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ // We were given an archive and found the __.PKGDEF in it.
+ // This tells us the size of the export data, and we don't
+ // need to return the entire file.
+ return &io.LimitedReader{
+ R: buf,
+ N: size,
+ }, nil
+}
+
+// readAll works the same way as io.ReadAll, but avoids allocations and copies
+// by preallocating a byte slice of the necessary size if the size is known up
+// front. This is always possible when the input is an archive. In that case,
+// NewReader will return the known size using an io.LimitedReader.
+func readAll(r io.Reader) ([]byte, error) {
+ if lr, ok := r.(*io.LimitedReader); ok {
+ data := make([]byte, lr.N)
+ _, err := io.ReadFull(lr, data)
+ return data, err
+ }
+ return io.ReadAll(r)
+}
+
+// Read reads export data from in, decodes it, and returns type
+// information for the package.
+//
+// Read is capable of reading export data produced by [Write] at the
+// same source code version, or by the last two Go releases (plus tip)
+// of the standard Go compiler. Reading files from older compilers may
+// produce an error.
+//
+// The package path (effectively its linker symbol prefix) is
+// specified by path, since unlike the package name, this information
+// may not be recorded in the export data.
+//
+// File position information is added to fset.
+//
+// Read may inspect and add to the imports map to ensure that references
+// within the export data to other packages are consistent. The caller
+// must ensure that imports[path] does not exist, or exists but is
+// incomplete (see types.Package.Complete), and Read inserts the
+// resulting package into this map entry.
+//
+// On return, the state of the reader is undefined.
+func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
+ data, err := readAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export data for %q: %v", path, err)
+ }
+
+ if bytes.HasPrefix(data, []byte("!")) {
+ return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
+ }
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 {
+ switch data[0] {
+ case 'v', 'c', 'd':
+ // binary, produced by cmd/compile till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+
+ case 'i':
+ // indexed, produced by cmd/compile till go1.19,
+ // and also by [Write].
+ //
+ // If proposal #69491 is accepted, go/types
+ // serialization will be implemented by
+ // types.Export, to which Write would eventually
+ // delegate (explicitly dropping any pretence at
+ // inter-version Write-Read compatibility).
+ // This [Read] function would delegate to types.Import
+ // when it detects that the file was produced by Export.
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ case 'u':
+ // unified, produced by cmd/compile since go1.20
+ _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ default:
+ l := min(len(data), 10)
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
+ }
+ }
+ return nil, fmt.Errorf("empty export data for %s", path)
+}
+
+// Write writes encoded type information for the specified package to out.
+// The FileSet provides file position information for named objects.
+func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ if _, err := io.WriteString(out, "i"); err != nil {
+ return err
+ }
+ return gcimporter.IExportData(out, fset, pkg)
+}
+
+// ReadBundle reads an export bundle from in, decodes it, and returns type
+// information for the packages.
+// File position information is added to fset.
+//
+// ReadBundle may inspect and add to the imports map to ensure that references
+// within the export bundle to other packages are consistent.
+//
+// On return, the state of the reader is undefined.
+//
+// Experimental: This API is experimental and may change in the future.
+func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
+ data, err := readAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export bundle: %v", err)
+ }
+ return gcimporter.IImportBundle(fset, imports, data)
+}
+
+// WriteBundle writes encoded type information for the specified packages to out.
+// The FileSet provides file position information for named objects.
+//
+// Experimental: This API is experimental and may change in the future.
+func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ return gcimporter.IExportBundle(out, fset, pkgs)
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
new file mode 100644
index 0000000000..37a7247e26
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
@@ -0,0 +1,75 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcexportdata
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "os"
+)
+
+// NewImporter returns a new instance of the types.Importer interface
+// that reads type information from export data files written by gc.
+// The Importer also satisfies types.ImporterFrom.
+//
+// Export data files are located using "go build" workspace conventions
+// and the build.Default context.
+//
+// Use this importer instead of go/importer.For("gc", ...) to avoid the
+// version-skew problems described in the documentation of this package,
+// or to control the FileSet or access the imports map populated during
+// package loading.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
+func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
+ return importer{fset, imports}
+}
+
+type importer struct {
+ fset *token.FileSet
+ imports map[string]*types.Package
+}
+
+func (imp importer) Import(importPath string) (*types.Package, error) {
+ return imp.ImportFrom(importPath, "", 0)
+}
+
+func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
+ filename, path := Find(importPath, srcDir)
+ if filename == "" {
+ if importPath == "unsafe" {
+ // Even for unsafe, call Find first in case
+ // the package was vendored.
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %s", importPath)
+ }
+
+ if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
+ return pkg, nil // cache hit
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ r, err := NewReader(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return Read(r, imp.fset, imp.imports, path)
+}
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
new file mode 100644
index 0000000000..366aab6b2c
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/doc.go
@@ -0,0 +1,253 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package packages loads Go packages for inspection and analysis.
+
+The [Load] function takes as input a list of patterns and returns a
+list of [Package] values describing individual packages matched by those
+patterns.
+A [Config] specifies configuration options, the most important of which is
+the [LoadMode], which controls the amount of detail in the loaded packages.
+
+Load passes most patterns directly to the underlying build tool.
+The default build tool is the go command.
+Its supported patterns are described at
+https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns.
+Other build systems may be supported by providing a "driver";
+see [The driver protocol].
+
+All patterns with the prefix "query=", where query is a
+non-empty string of letters from [a-z], are reserved and may be
+interpreted as query operators.
+
+Two query operators are currently supported: "file" and "pattern".
+
+The query "file=path/to/file.go" matches the package or packages enclosing
+the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
+might return the packages "fmt" and "fmt [fmt.test]".
+
+The query "pattern=string" causes "string" to be passed directly to
+the underlying build tool. In most cases this is unnecessary,
+but an application can use Load("pattern=" + x) as an escaping mechanism
+to ensure that x is not interpreted as a query operator if it contains '='.
+
+All other query operators are reserved for future use and currently
+cause Load to report an error.
+
+The Package struct provides basic information about the package, including
+
+ - ID, a unique identifier for the package in the returned set;
+ - GoFiles, the names of the package's Go source files;
+ - Imports, a map from source import strings to the Packages they name;
+ - Types, the type information for the package's exported symbols;
+ - Syntax, the parsed syntax trees for the package's source code; and
+ - TypesInfo, the result of a complete type-check of the package syntax trees.
+
+(See the documentation for type Package for the complete list of fields
+and more detailed descriptions.)
+
+For example,
+
+ Load(nil, "bytes", "unicode...")
+
+returns four Package structs describing the standard library packages
+bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
+can match multiple packages and that a package might be matched by
+multiple patterns: in general it is not possible to determine which
+packages correspond to which patterns.
+
+Note that the list returned by Load contains only the packages matched
+by the patterns. Their dependencies can be found by walking the import
+graph using the Imports fields.
+
+The Load function can be configured by passing a pointer to a Config as
+the first argument. A nil Config is equivalent to the zero Config, which
+causes Load to run in [LoadFiles] mode, collecting minimal information.
+See the documentation for type Config for details.
+
+As noted earlier, the Config.Mode controls the amount of detail
+reported about the loaded packages. See the documentation for type LoadMode
+for details.
+
+Most tools should pass their command-line arguments (after any flags)
+uninterpreted to Load, so that it can interpret them
+according to the conventions of the underlying build system.
+
+See the Example function for typical usage.
+See also [golang.org/x/tools/go/packages/internal/linecount]
+for an example application.
+
+# The driver protocol
+
+Load may be used to load Go packages even in Go projects that use
+alternative build systems, by installing an appropriate "driver"
+program for the build system and specifying its location in the
+GOPACKAGESDRIVER environment variable.
+For example,
+https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
+explains how to use the driver for Bazel.
+
+The driver program is responsible for interpreting patterns in its
+preferred notation and reporting information about the packages that
+those patterns identify. Drivers must also support the special "file="
+and "pattern=" patterns described above.
+
+The patterns are provided as positional command-line arguments. A
+JSON-encoded [DriverRequest] message providing additional information
+is written to the driver's standard input. The driver must write a
+JSON-encoded [DriverResponse] message to its standard output. (This
+message differs from the JSON schema produced by 'go list'.)
+
+The value of the PWD environment variable seen by the driver process
+is the preferred name of its working directory. (The working directory
+may have other aliases due to symbolic links; see the comment on the
+Dir field of [exec.Cmd] for related information.)
+When the driver process emits in its response the name of a file
+that is a descendant of this directory, it must use an absolute path
+that has the value of PWD as a prefix, to ensure that the returned
+filenames satisfy the original query.
+*/
+package packages // import "golang.org/x/tools/go/packages"
+
+/*
+
+Motivation and design considerations
+
+The new package's design solves problems addressed by two existing
+packages: go/build, which locates and describes packages, and
+golang.org/x/tools/go/loader, which loads, parses and type-checks them.
+The go/build.Package structure encodes too much of the 'go build' way
+of organizing projects, leaving us in need of a data type that describes a
+package of Go source code independent of the underlying build system.
+We wanted something that works equally well with go build and vgo, and
+also other build systems such as Bazel and Blaze, making it possible to
+construct analysis tools that work in all these environments.
+Tools such as errcheck and staticcheck were essentially unavailable to
+the Go community at Google, and some of Google's internal tools for Go
+are unavailable externally.
+This new package provides a uniform way to obtain package metadata by
+querying each of these build systems, optionally supporting their
+preferred command-line notations for packages, so that tools integrate
+neatly with users' build environments. The Metadata query function
+executes an external query tool appropriate to the current workspace.
+
+Loading packages always returns the complete import graph "all the way down",
+even if all you want is information about a single package, because the query
+mechanisms of all the build systems we currently support ({go,vgo} list, and
+blaze/bazel aspect-based query) cannot provide detailed information
+about one package without visiting all its dependencies too, so there is
+no additional asymptotic cost to providing transitive information.
+(This property might not be true of a hypothetical 5th build system.)
+
+In calls to TypeCheck, all initial packages, and any package that
+transitively depends on one of them, must be loaded from source.
+Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
+source; D may be loaded from export data, and E may not be loaded at all
+(though it's possible that D's export data mentions it, so a
+types.Package may be created for it and exposed.)
+
+The old loader had a feature to suppress type-checking of function
+bodies on a per-package basis, primarily intended to reduce the work of
+obtaining type information for imported packages. Now that imports are
+satisfied by export data, the optimization no longer seems necessary.
+
+Despite some early attempts, the old loader did not exploit export data,
+instead always using the equivalent of WholeProgram mode. This was due
+to the complexity of mixing source and export data packages (now
+resolved by the upward traversal mentioned above), and because export data
+files were nearly always missing or stale. Now that 'go build' supports
+caching, all the underlying build systems can guarantee to produce
+export data in a reasonable (amortized) time.
+
+Test "main" packages synthesized by the build system are now reported as
+first-class packages, avoiding the need for clients (such as go/ssa) to
+reinvent this generation logic.
+
+One way in which go/packages is simpler than the old loader is in its
+treatment of in-package tests. In-package tests are packages that
+consist of all the files of the library under test, plus the test files.
+The old loader constructed in-package tests by a two-phase process of
+mutation called "augmentation": first it would construct and type check
+all the ordinary library packages and type-check the packages that
+depend on them; then it would add more (test) files to the package and
+type-check again. This two-phase approach had four major problems:
+1) in processing the tests, the loader modified the library package,
+ leaving no way for a client application to see both the test
+ package and the library package; one would mutate into the other.
+2) because test files can declare additional methods on types defined in
+ the library portion of the package, the dispatch of method calls in
+ the library portion was affected by the presence of the test files.
+ This should have been a clue that the packages were logically
+ different.
+3) this model of "augmentation" assumed at most one in-package test
+ per library package, which is true of projects using 'go build',
+ but not other build systems.
+4) because of the two-phase nature of test processing, all packages that
+ import the library package had to be processed before augmentation,
+ forcing a "one-shot" API and preventing the client from calling Load
+ in several times in sequence as is now possible in WholeProgram mode.
+ (TypeCheck mode has a similar one-shot restriction for a different reason.)
+
+Early drafts of this package supported "multi-shot" operation.
+Although it allowed clients to make a sequence of calls (or concurrent
+calls) to Load, building up the graph of Packages incrementally,
+it was of marginal value: it complicated the API
+(since it allowed some options to vary across calls but not others),
+it complicated the implementation,
+it cannot be made to work in Types mode, as explained above,
+and it was less efficient than making one combined call (when this is possible).
+Among the clients we have inspected, none made multiple calls to load
+but could not be easily and satisfactorily modified to make only a single call.
+However, applications changes may be required.
+For example, the ssadump command loads the user-specified packages
+and in addition the runtime package. It is tempting to simply append
+"runtime" to the user-provided list, but that does not work if the user
+specified an ad-hoc package such as [a.go b.go].
+Instead, ssadump no longer requests the runtime package,
+but seeks it among the dependencies of the user-specified packages,
+and emits an error if it is not found.
+
+Questions & Tasks
+
+- Add GOARCH/GOOS?
+ They are not portable concepts, but could be made portable.
+ Our goal has been to allow users to express themselves using the conventions
+ of the underlying build system: if the build system honors GOARCH
+ during a build and during a metadata query, then so should
+ applications built atop that query mechanism.
+ Conversely, if the target architecture of the build is determined by
+ command-line flags, the application can pass the relevant
+ flags through to the build system using a command such as:
+ myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
+ However, this approach is low-level, unwieldy, and non-portable.
+ GOOS and GOARCH seem important enough to warrant a dedicated option.
+
+- How should we handle partial failures such as a mixture of good and
+ malformed patterns, existing and non-existent packages, successful and
+ failed builds, import failures, import cycles, and so on, in a call to
+ Load?
+
+- Support bazel, blaze, and go1.10 list, not just go1.11 list.
+
+- Handle (and test) various partial success cases, e.g.
+ a mixture of good packages and:
+ invalid patterns
+ nonexistent packages
+ empty packages
+ packages with malformed package or import declarations
+ unreadable files
+ import cycles
+ other parse errors
+ type errors
+ Make sure we record errors at the correct place in the graph.
+
+- Missing packages among initial arguments are not reported.
+ Return bogus packages for them, like golist does.
+
+- "undeclared name" errors (for example) are reported out of source file
+ order. I suspect this is due to the breadth-first resolution now used
+ by go/types. Is that a bug? Discuss with gri.
+
+*/
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
new file mode 100644
index 0000000000..f37bc65100
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -0,0 +1,153 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// This file defines the protocol that enables an external "driver"
+// tool to supply package metadata in place of 'go list'.
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "slices"
+ "strings"
+)
+
+// DriverRequest defines the schema of a request for package metadata
+// from an external driver program. The JSON-encoded DriverRequest
+// message is provided to the driver program's standard input. The
+// query patterns are provided as command-line arguments.
+//
+// See the package documentation for an overview.
+type DriverRequest struct {
+ Mode LoadMode `json:"mode"`
+
+ // Env specifies the environment the underlying build system should be run in.
+ Env []string `json:"env"`
+
+ // BuildFlags are flags that should be passed to the underlying build system.
+ BuildFlags []string `json:"build_flags"`
+
+ // Tests specifies whether the patterns should also return test packages.
+ Tests bool `json:"tests"`
+
+ // Overlay maps file paths (relative to the driver's working directory)
+ // to the contents of overlay files (see Config.Overlay).
+ Overlay map[string][]byte `json:"overlay"`
+}
+
+// DriverResponse defines the schema of a response from an external
+// driver program, providing the results of a query for package
+// metadata. The driver program must write a JSON-encoded
+// DriverResponse message to its standard output.
+//
+// See the package documentation for an overview.
+type DriverResponse struct {
+ // NotHandled is returned if the request can't be handled by the current
+ // driver. If an external driver returns a response with NotHandled, the
+ // rest of the DriverResponse is ignored, and go/packages will fallback
+ // to the next driver. If go/packages is extended in the future to support
+ // lists of multiple drivers, go/packages will fall back to the next driver.
+ NotHandled bool
+
+ // Compiler and Arch are the arguments pass of types.SizesFor
+ // to get a types.Sizes to use when type checking.
+ Compiler string
+ Arch string
+
+ // Roots is the set of package IDs that make up the root packages.
+ // We have to encode this separately because when we encode a single package
+ // we cannot know if it is one of the roots as that requires knowledge of the
+ // graph it is part of.
+ Roots []string `json:",omitempty"`
+
+ // Packages is the full set of packages in the graph.
+ // The packages are not connected into a graph.
+ // The Imports if populated will be stubs that only have their ID set.
+ // Imports will be connected and then type and syntax information added in a
+ // later pass (see refine).
+ Packages []*Package
+
+ // GoVersion is the minor version number used by the driver
+ // (e.g. the go command on the PATH) when selecting .go files.
+ // Zero means unknown.
+ GoVersion int
+}
+
+// driver is the type for functions that query the build system for the
+// packages named by the patterns.
+type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
+
+// findExternalDriver returns the file path of a tool that supplies
+// the build system package structure, or "" if not found.
+// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
+// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
+func findExternalDriver(cfg *Config) driver {
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range cfg.Env {
+ if val, ok := strings.CutPrefix(env, toolPrefix); ok {
+ tool = val
+ }
+ }
+ if tool != "" && tool == "off" {
+ return nil
+ }
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ return nil
+ }
+ }
+ return func(cfg *Config, patterns []string) (*DriverResponse, error) {
+ req, err := json.Marshal(DriverRequest{
+ Mode: cfg.Mode,
+ Env: cfg.Env,
+ BuildFlags: cfg.BuildFlags,
+ Tests: cfg.Tests,
+ Overlay: cfg.Overlay,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
+ }
+
+ buf := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, tool, patterns...)
+ cmd.Dir = cfg.Dir
+ // The cwd gets resolved to the real path. On Darwin, where
+ // /tmp is a symlink, this breaks anything that expects the
+ // working directory to keep the original path, including the
+ // go command when dealing with modules.
+ //
+ // os.Getwd stdlib has a special feature where if the
+ // cwd and the PWD are the same node then it trusts
+ // the PWD, so by setting it in the env for the child
+ // process we fix up all the paths returned by the go
+ // command.
+ //
+ // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
+ cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
+ cmd.Stdin = bytes.NewReader(req)
+ cmd.Stdout = buf
+ cmd.Stderr = stderr
+
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
+ fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
+ }
+
+ var response DriverResponse
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return &response, nil
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
new file mode 100644
index 0000000000..680a70ca8f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -0,0 +1,1086 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+)
+
+// debug controls verbose logging.
+var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
+
+// A goTooOldError reports that the go command
+// found by exec.LookPath is too old to use the new go list behavior.
+type goTooOldError struct {
+ error
+}
+
+// responseDeduper wraps a DriverResponse, deduplicating its contents.
+type responseDeduper struct {
+ seenRoots map[string]bool
+ seenPackages map[string]*Package
+ dr *DriverResponse
+}
+
+func newDeduper() *responseDeduper {
+ return &responseDeduper{
+ dr: &DriverResponse{},
+ seenRoots: map[string]bool{},
+ seenPackages: map[string]*Package{},
+ }
+}
+
+// addAll fills in r with a DriverResponse.
+func (r *responseDeduper) addAll(dr *DriverResponse) {
+ for _, pkg := range dr.Packages {
+ r.addPackage(pkg)
+ }
+ for _, root := range dr.Roots {
+ r.addRoot(root)
+ }
+ r.dr.GoVersion = dr.GoVersion
+}
+
+func (r *responseDeduper) addPackage(p *Package) {
+ if r.seenPackages[p.ID] != nil {
+ return
+ }
+ r.seenPackages[p.ID] = p
+ r.dr.Packages = append(r.dr.Packages, p)
+}
+
+func (r *responseDeduper) addRoot(id string) {
+ if r.seenRoots[id] {
+ return
+ }
+ r.seenRoots[id] = true
+ r.dr.Roots = append(r.dr.Roots, id)
+}
+
+type golistState struct {
+ cfg *Config
+ ctx context.Context
+
+ runner *gocommand.Runner
+
+ // overlay is the JSON file that encodes the Config.Overlay
+ // mapping, used by 'go list -overlay=...'.
+ overlay string
+
+ envOnce sync.Once
+ goEnvError error
+ goEnv map[string]string
+
+ rootsOnce sync.Once
+ rootDirsError error
+ rootDirs map[string]string
+
+ goVersionOnce sync.Once
+ goVersionError error
+ goVersion int // The X in Go 1.X.
+
+ // vendorDirs caches the (non)existence of vendor directories.
+ vendorDirs map[string]bool
+}
+
+// getEnv returns Go environment variables. Only specific variables are
+// populated -- computing all of them is slow.
+func (state *golistState) getEnv() (map[string]string, error) {
+ state.envOnce.Do(func() {
+ var b *bytes.Buffer
+ b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
+ if state.goEnvError != nil {
+ return
+ }
+
+ state.goEnv = make(map[string]string)
+ decoder := json.NewDecoder(b)
+ if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
+ return
+ }
+ })
+ return state.goEnv, state.goEnvError
+}
+
+// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
+func (state *golistState) mustGetEnv() map[string]string {
+ env, err := state.getEnv()
+ if err != nil {
+ panic(fmt.Sprintf("mustGetEnv: %v", err))
+ }
+ return env
+}
+
+// goListDriver uses the go list command to interpret the patterns and produce
+// the build system package structure.
+// See driver for more details.
+//
+// overlay is the JSON file that encodes the cfg.Overlay
+// mapping, used by 'go list -overlay=...'
+func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
+ // Make sure that any asynchronous go commands are killed when we return.
+ parentCtx := cfg.Context
+ if parentCtx == nil {
+ parentCtx = context.Background()
+ }
+ ctx, cancel := context.WithCancel(parentCtx)
+ defer cancel()
+
+ response := newDeduper()
+
+ state := &golistState{
+ cfg: cfg,
+ ctx: ctx,
+ vendorDirs: map[string]bool{},
+ overlay: overlay,
+ runner: runner,
+ }
+
+ // Fill in response.Sizes asynchronously if necessary.
+ if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
+ errCh := make(chan error)
+ go func() {
+ compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
+ response.dr.Compiler = compiler
+ response.dr.Arch = arch
+ errCh <- err
+ }()
+ defer func() {
+ if sizesErr := <-errCh; sizesErr != nil {
+ err = sizesErr
+ }
+ }()
+ }
+
+ // Determine files requested in contains patterns
+ var containFiles []string
+ restPatterns := make([]string, 0, len(patterns))
+ // Extract file= and other [querytype]= patterns. Report an error if querytype
+ // doesn't exist.
+extractQueries:
+ for _, pattern := range patterns {
+ eqidx := strings.Index(pattern, "=")
+ if eqidx < 0 {
+ restPatterns = append(restPatterns, pattern)
+ } else {
+ query, value := pattern[:eqidx], pattern[eqidx+len("="):]
+ switch query {
+ case "file":
+ containFiles = append(containFiles, value)
+ case "pattern":
+ restPatterns = append(restPatterns, value)
+ case "": // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ default:
+ for _, rune := range query {
+ if rune < 'a' || rune > 'z' { // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ continue extractQueries
+ }
+ }
+ // Reject all other patterns containing "="
+ return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
+ }
+ }
+ }
+
+ // See if we have any patterns to pass through to go list. Zero initial
+ // patterns also requires a go list call, since it's the equivalent of
+ // ".".
+ if len(restPatterns) > 0 || len(patterns) == 0 {
+ dr, err := state.createDriverResponse(restPatterns...)
+ if err != nil {
+ return nil, err
+ }
+ response.addAll(dr)
+ }
+
+ if len(containFiles) != 0 {
+ if err := state.runContainsQueries(response, containFiles); err != nil {
+ return nil, err
+ }
+ }
+
+ // (We may yet return an error due to defer.)
+ return response.dr, nil
+}
+
+// abs returns an absolute representation of path, based on cfg.Dir.
+func (cfg *Config) abs(path string) (string, error) {
+ if filepath.IsAbs(path) {
+ return path, nil
+ }
+ // In case cfg.Dir is relative, pass it to filepath.Abs.
+ return filepath.Abs(filepath.Join(cfg.Dir, path))
+}
+
+func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
+ for _, query := range queries {
+ // TODO(matloob): Do only one query per directory.
+ fdir := filepath.Dir(query)
+ // Pass absolute path of directory to go list so that it knows to treat it as a directory,
+ // not a package path.
+ pattern, err := state.cfg.abs(fdir)
+ if err != nil {
+ return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
+ }
+ dirResponse, err := state.createDriverResponse(pattern)
+
+ // If there was an error loading the package, or no packages are returned,
+ // or the package is returned with errors, try to load the file as an
+ // ad-hoc package.
+ // Usually the error will appear in a returned package, but may not if we're
+ // in module mode and the ad-hoc is located outside a module.
+ if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
+ len(dirResponse.Packages[0].Errors) == 1 {
+ var queryErr error
+ if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
+ return err // return the original error
+ }
+ }
+ isRoot := make(map[string]bool, len(dirResponse.Roots))
+ for _, root := range dirResponse.Roots {
+ isRoot[root] = true
+ }
+ for _, pkg := range dirResponse.Packages {
+ // Add any new packages to the main set
+ // We don't bother to filter packages that will be dropped by the changes of roots,
+ // that will happen anyway during graph construction outside this function.
+ // Over-reporting packages is not a problem.
+ response.addPackage(pkg)
+ // if the package was not a root one, it cannot have the file
+ if !isRoot[pkg.ID] {
+ continue
+ }
+ for _, pkgFile := range pkg.GoFiles {
+ if filepath.Base(query) == filepath.Base(pkgFile) {
+ response.addRoot(pkg.ID)
+ break
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// adhocPackage attempts to load or construct an ad-hoc package for a given
+// query, if the original call to the driver produced inadequate results.
+func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) {
+ response, err := state.createDriverResponse(query)
+ if err != nil {
+ return nil, err
+ }
+ // If we get nothing back from `go list`,
+ // try to make this file into its own ad-hoc package.
+ // TODO(rstambler): Should this check against the original response?
+ if len(response.Packages) == 0 {
+ response.Packages = append(response.Packages, &Package{
+ ID: "command-line-arguments",
+ PkgPath: query,
+ GoFiles: []string{query},
+ CompiledGoFiles: []string{query},
+ Imports: make(map[string]*Package),
+ })
+ response.Roots = append(response.Roots, "command-line-arguments")
+ }
+ // Handle special cases.
+ if len(response.Packages) == 1 {
+ // golang/go#33482: If this is a file= query for ad-hoc packages where
+ // the file only exists on an overlay, and exists outside of a module,
+ // add the file to the package and remove the errors.
+ if response.Packages[0].ID == "command-line-arguments" ||
+ filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
+ if len(response.Packages[0].GoFiles) == 0 {
+ filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
+ // TODO(matloob): check if the file is outside of a root dir?
+ for path := range state.cfg.Overlay {
+ if path == filename {
+ response.Packages[0].Errors = nil
+ response.Packages[0].GoFiles = []string{path}
+ response.Packages[0].CompiledGoFiles = []string{path}
+ }
+ }
+ }
+ }
+ }
+ return response, nil
+}
+
+// Fields must match go list;
+// see $GOROOT/src/cmd/go/internal/load/pkg.go.
+type jsonPackage struct {
+ ImportPath string
+ Dir string
+ Name string
+ Target string
+ Export string
+ GoFiles []string
+ CompiledGoFiles []string
+ IgnoredGoFiles []string
+ IgnoredOtherFiles []string
+ EmbedPatterns []string
+ EmbedFiles []string
+ CFiles []string
+ CgoFiles []string
+ CXXFiles []string
+ MFiles []string
+ HFiles []string
+ FFiles []string
+ SFiles []string
+ SwigFiles []string
+ SwigCXXFiles []string
+ SysoFiles []string
+ Imports []string
+ ImportMap map[string]string
+ Deps []string
+ Module *Module
+ TestGoFiles []string
+ TestImports []string
+ XTestGoFiles []string
+ XTestImports []string
+ ForTest string // q in a "p [q.test]" package, else ""
+ DepOnly bool
+
+ Error *packagesinternal.PackageError
+ DepsErrors []*packagesinternal.PackageError
+}
+
+func otherFiles(p *jsonPackage) [][]string {
+ return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
+}
+
+// createDriverResponse uses the "go list" command to expand the pattern
+// words and return a response for the specified packages.
+func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) {
+ // go list uses the following identifiers in ImportPath and Imports:
+ //
+ // "p" -- importable package or main (command)
+ // "q.test" -- q's test executable
+ // "p [q.test]" -- variant of p as built for q's test executable
+ // "q_test [q.test]" -- q's external test package
+ //
+ // The packages p that are built differently for a test q.test
+ // are q itself, plus any helpers used by the external test q_test,
+ // typically including "testing" and all its dependencies.
+
+ // Run "go list" for complete
+ // information on the specified packages.
+ goVersion, err := state.getGoVersion()
+ if err != nil {
+ return nil, err
+ }
+ buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...)
+ if err != nil {
+ return nil, err
+ }
+
+ seen := make(map[string]*jsonPackage)
+ pkgs := make(map[string]*Package)
+ additionalErrors := make(map[string][]Error)
+ // Decode the JSON and convert it to Package form.
+ response := &DriverResponse{
+ GoVersion: goVersion,
+ }
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ if p.ImportPath == "" {
+ // The documentation for go list says that “[e]rroneous packages will have
+ // a non-empty ImportPath”. If for some reason it comes back empty, we
+ // prefer to error out rather than silently discarding data or handing
+ // back a package without any way to refer to it.
+ if p.Error != nil {
+ return nil, Error{
+ Pos: p.Error.Pos,
+ Msg: p.Error.Err,
+ }
+ }
+ return nil, fmt.Errorf("package missing import path: %+v", p)
+ }
+
+ // Work around https://golang.org/issue/33157:
+ // go list -e, when given an absolute path, will find the package contained at
+ // that directory. But when no package exists there, it will return a fake package
+ // with an error and the ImportPath set to the absolute path provided to go list.
+ // Try to convert that absolute path to what its package path would be if it's
+ // contained in a known module or GOPATH entry. This will allow the package to be
+ // properly "reclaimed" when overlays are processed.
+ if filepath.IsAbs(p.ImportPath) && p.Error != nil {
+ pkgPath, ok, err := state.getPkgPath(p.ImportPath)
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ p.ImportPath = pkgPath
+ }
+ }
+
+ if old, found := seen[p.ImportPath]; found {
+ // If one version of the package has an error, and the other doesn't, assume
+ // that this is a case where go list is reporting a fake dependency variant
+ // of the imported package: When a package tries to invalidly import another
+ // package, go list emits a variant of the imported package (with the same
+ // import path, but with an error on it, and the package will have a
+ // DepError set on it). An example of when this can happen is for imports of
+ // main packages: main packages can not be imported, but they may be
+ // separately matched and listed by another pattern.
+ // See golang.org/issue/36188 for more details.
+
+ // The plan is that eventually, hopefully in Go 1.15, the error will be
+ // reported on the importing package rather than the duplicate "fake"
+ // version of the imported package. Once all supported versions of Go
+ // have the new behavior this logic can be deleted.
+ // TODO(matloob): delete the workaround logic once all supported versions of
+ // Go return the errors on the proper package.
+
+ // There should be exactly one version of a package that doesn't have an
+ // error.
+ if old.Error == nil && p.Error == nil {
+ if !reflect.DeepEqual(p, old) {
+ return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
+ }
+ continue
+ }
+
+ // Determine if this package's error needs to be bubbled up.
+ // This is a hack, and we expect for go list to eventually set the error
+ // on the package.
+ if old.Error != nil {
+ var errkind string
+ if strings.Contains(old.Error.Err, "not an importable package") {
+ errkind = "not an importable package"
+ } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
+ errkind = "use of internal package not allowed"
+ }
+ if errkind != "" {
+ if len(old.Error.ImportStack) < 1 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind)
+ }
+ importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1]
+ if importingPkg == old.ImportPath {
+ // Using an older version of Go which put this package itself on top of import
+ // stack, instead of the importer. Look for importer in second from top
+ // position.
+ if len(old.Error.ImportStack) < 2 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind)
+ }
+ importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2]
+ }
+ additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
+ Pos: old.Error.Pos,
+ Msg: old.Error.Err,
+ Kind: ListError,
+ })
+ }
+ }
+
+ // Make sure that if there's a version of the package without an error,
+ // that's the one reported to the user.
+ if old.Error == nil {
+ continue
+ }
+
+ // This package will replace the old one at the end of the loop.
+ }
+ seen[p.ImportPath] = p
+
+ pkg := &Package{
+ Name: p.Name,
+ ID: p.ImportPath,
+ Dir: p.Dir,
+ Target: p.Target,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
+ CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
+ EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
+ IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
+ ForTest: p.ForTest,
+ depsErrors: p.DepsErrors,
+ Module: p.Module,
+ }
+
+ if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 {
+ if len(p.CompiledGoFiles) > len(p.GoFiles) {
+ // We need the cgo definitions, which are in the first
+ // CompiledGoFile after the non-cgo ones. This is a hack but there
+ // isn't currently a better way to find it. We also need the pure
+ // Go files and unprocessed cgo files, all of which are already
+ // in pkg.GoFiles.
+ cgoTypes := p.CompiledGoFiles[len(p.GoFiles)]
+ pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...)
+ } else {
+ // golang/go#38990: go list silently fails to do cgo processing
+ pkg.CompiledGoFiles = nil
+ pkg.Errors = append(pkg.Errors, Error{
+ Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.",
+ Kind: ListError,
+ })
+ }
+ }
+
+ // Work around https://golang.org/issue/28749:
+ // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
+ // Remove files from CompiledGoFiles that are non-go files
+ // (or are not files that look like they are from the cache).
+ if len(pkg.CompiledGoFiles) > 0 {
+ out := pkg.CompiledGoFiles[:0]
+ for _, f := range pkg.CompiledGoFiles {
+ if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file
+ continue
+ }
+ out = append(out, f)
+ }
+ pkg.CompiledGoFiles = out
+ }
+
+ // Extract the PkgPath from the package's ID.
+ if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
+ pkg.PkgPath = pkg.ID[:i]
+ } else {
+ pkg.PkgPath = pkg.ID
+ }
+
+ if pkg.PkgPath == "unsafe" {
+ pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929)
+ } else if len(pkg.CompiledGoFiles) == 0 {
+ // Work around for pre-go.1.11 versions of go list.
+ // TODO(matloob): they should be handled by the fallback.
+ // Can we delete this?
+ pkg.CompiledGoFiles = pkg.GoFiles
+ }
+
+ // Assume go list emits only absolute paths for Dir.
+ if p.Dir != "" && !filepath.IsAbs(p.Dir) {
+ log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
+ }
+
+ if p.Export != "" && !filepath.IsAbs(p.Export) {
+ pkg.ExportFile = filepath.Join(p.Dir, p.Export)
+ } else {
+ pkg.ExportFile = p.Export
+ }
+
+ // imports
+ //
+ // Imports contains the IDs of all imported packages.
+ // ImportsMap records (path, ID) only where they differ.
+ ids := make(map[string]bool)
+ for _, id := range p.Imports {
+ ids[id] = true
+ }
+ pkg.Imports = make(map[string]*Package)
+ for path, id := range p.ImportMap {
+ pkg.Imports[path] = &Package{ID: id} // non-identity import
+ delete(ids, id)
+ }
+ for id := range ids {
+ if id == "C" {
+ continue
+ }
+
+ pkg.Imports[id] = &Package{ID: id} // identity import
+ }
+ if !p.DepOnly {
+ response.Roots = append(response.Roots, pkg.ID)
+ }
+
+ // Temporary work-around for golang/go#39986. Parse filenames out of
+ // error messages. This happens if there are unrecoverable syntax
+ // errors in the source, so we can't match on a specific error message.
+ //
+ // TODO(rfindley): remove this heuristic, in favor of considering
+ // InvalidGoFiles from the list driver.
+ if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) {
+ addFilenameFromPos := func(pos string) bool {
+ split := strings.Split(pos, ":")
+ if len(split) < 1 {
+ return false
+ }
+ filename := strings.TrimSpace(split[0])
+ if filename == "" {
+ return false
+ }
+ if !filepath.IsAbs(filename) {
+ filename = filepath.Join(state.cfg.Dir, filename)
+ }
+ info, _ := os.Stat(filename)
+ if info == nil {
+ return false
+ }
+ pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
+ pkg.GoFiles = append(pkg.GoFiles, filename)
+ return true
+ }
+ found := addFilenameFromPos(err.Pos)
+ // In some cases, go list only reports the error position in the
+ // error text, not the error position. One such case is when the
+ // file's package name is a keyword (see golang.org/issue/39763).
+ if !found {
+ addFilenameFromPos(err.Err)
+ }
+ }
+
+ if p.Error != nil {
+ msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
+ // Address golang.org/issue/35964 by appending import stack to error message.
+ if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
+ msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
+ }
+ pkg.Errors = append(pkg.Errors, Error{
+ Pos: p.Error.Pos,
+ Msg: msg,
+ Kind: ListError,
+ })
+ }
+
+ pkgs[pkg.ID] = pkg
+ }
+
+ for id, errs := range additionalErrors {
+ if p, ok := pkgs[id]; ok {
+ p.Errors = append(p.Errors, errs...)
+ }
+ }
+ for _, pkg := range pkgs {
+ response.Packages = append(response.Packages, pkg)
+ }
+ sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
+
+ return response, nil
+}
+
+func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
+ if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 {
+ return false
+ }
+
+ goV, err := state.getGoVersion()
+ if err != nil {
+ return false
+ }
+
+ // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty.
+ // The import stack behaves differently for these versions than newer Go versions.
+ if goV < 15 {
+ return len(p.Error.ImportStack) == 0
+ }
+
+ // On Go 1.15 and later, only parse filenames out of error if there's no import stack,
+ // or the current package is at the top of the import stack. This is not guaranteed
+ // to work perfectly, but should avoid some cases where files in errors don't belong to this
+ // package.
+ return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
+}
+
+// getGoVersion returns the effective minor version of the go command.
+func (state *golistState) getGoVersion() (int, error) {
+ state.goVersionOnce.Do(func() {
+ state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
+ })
+ return state.goVersion, state.goVersionError
+}
+
+// getPkgPath finds the package path of a directory if it's relative to a root
+// directory.
+func (state *golistState) getPkgPath(dir string) (string, bool, error) {
+ if !filepath.IsAbs(dir) {
+ panic("non-absolute dir passed to getPkgPath")
+ }
+ roots, err := state.determineRootDirs()
+ if err != nil {
+ return "", false, err
+ }
+
+ for rdir, rpath := range roots {
+ // Make sure that the directory is in the module,
+ // to avoid creating a path relative to another module.
+ if !strings.HasPrefix(dir, rdir) {
+ continue
+ }
+ // TODO(matloob): This doesn't properly handle symlinks.
+ r, err := filepath.Rel(rdir, dir)
+ if err != nil {
+ continue
+ }
+ if rpath != "" {
+ // We choose only one root even though the directory even it can belong in multiple modules
+ // or GOPATH entries. This is okay because we only need to work with absolute dirs when a
+ // file is missing from disk, for instance when gopls calls go/packages in an overlay.
+ // Once the file is saved, gopls, or the next invocation of the tool will get the correct
+ // result straight from golist.
+ // TODO(matloob): Implement module tiebreaking?
+ return path.Join(rpath, filepath.ToSlash(r)), true, nil
+ }
+ return filepath.ToSlash(r), true, nil
+ }
+ return "", false, nil
+}
+
+// absJoin absolutizes and flattens the lists of files.
+func absJoin(dir string, fileses ...[]string) (res []string) {
+ for _, files := range fileses {
+ for _, file := range files {
+ if !filepath.IsAbs(file) {
+ file = filepath.Join(dir, file)
+ }
+ res = append(res, file)
+ }
+ }
+ return res
+}
+
+func jsonFlag(cfg *Config, goVersion int) string {
+ if goVersion < 19 {
+ return "-json"
+ }
+ var fields []string
+ added := make(map[string]bool)
+ addFields := func(fs ...string) {
+ for _, f := range fs {
+ if !added[f] {
+ added[f] = true
+ fields = append(fields, f)
+ }
+ }
+ }
+ addFields("Name", "ImportPath", "Error") // These fields are always needed
+ if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
+ addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
+ "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
+ "SwigFiles", "SwigCXXFiles", "SysoFiles")
+ if cfg.Tests {
+ addFields("TestGoFiles", "XTestGoFiles")
+ }
+ }
+ if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
+ // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
+ // even when -compiled isn't passed in.
+ // TODO(#52435): Should we make the test ask for -compiled, or automatically
+ // request CompiledGoFiles in certain circumstances?
+ addFields("Dir", "CompiledGoFiles")
+ }
+ if cfg.Mode&NeedCompiledGoFiles != 0 {
+ addFields("Dir", "CompiledGoFiles", "Export")
+ }
+ if cfg.Mode&NeedImports != 0 {
+ // When imports are requested, DepOnly is used to distinguish between packages
+ // explicitly requested and transitive imports of those packages.
+ addFields("DepOnly", "Imports", "ImportMap")
+ if cfg.Tests {
+ addFields("TestImports", "XTestImports")
+ }
+ }
+ if cfg.Mode&NeedDeps != 0 {
+ addFields("DepOnly")
+ }
+ if usesExportData(cfg) {
+ // Request Dir in the unlikely case Export is not absolute.
+ addFields("Dir", "Export")
+ }
+ if cfg.Mode&NeedForTest != 0 {
+ addFields("ForTest")
+ }
+ if cfg.Mode&needInternalDepsErrors != 0 {
+ addFields("DepsErrors")
+ }
+ if cfg.Mode&NeedModule != 0 {
+ addFields("Module")
+ }
+ if cfg.Mode&NeedEmbedFiles != 0 {
+ addFields("EmbedFiles")
+ }
+ if cfg.Mode&NeedEmbedPatterns != 0 {
+ addFields("EmbedPatterns")
+ }
+ if cfg.Mode&NeedTarget != 0 {
+ addFields("Target")
+ }
+ return "-json=" + strings.Join(fields, ",")
+}
+
+func golistargs(cfg *Config, words []string, goVersion int) []string {
+ const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
+ fullargs := []string{
+ "-e", jsonFlag(cfg, goVersion),
+ fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
+ fmt.Sprintf("-test=%t", cfg.Tests),
+ fmt.Sprintf("-export=%t", usesExportData(cfg)),
+ fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
+ // go list doesn't let you pass -test and -find together,
+ // probably because you'd just get the TestMain.
+ fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
+ }
+
+ // golang/go#60456: with go1.21 and later, go list serves pgo variants, which
+ // can be costly to compute and may result in redundant processing for the
+ // caller. Disable these variants. If someone wants to add e.g. a NeedPGO
+ // mode flag, that should be a separate proposal.
+ if goVersion >= 21 {
+ fullargs = append(fullargs, "-pgo=off")
+ }
+
+ fullargs = append(fullargs, cfg.BuildFlags...)
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ return fullargs
+}
+
+// cfgInvocation returns an Invocation that reflects cfg's settings.
+func (state *golistState) cfgInvocation() gocommand.Invocation {
+ cfg := state.cfg
+ return gocommand.Invocation{
+ BuildFlags: cfg.BuildFlags,
+ CleanEnv: cfg.Env != nil,
+ Env: cfg.Env,
+ Logf: cfg.Logf,
+ WorkingDir: cfg.Dir,
+ Overlay: state.overlay,
+ }
+}
+
+// invokeGo returns the stdout of a go command invocation.
+func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
+ cfg := state.cfg
+
+ inv := state.cfgInvocation()
+ inv.Verb = verb
+ inv.Args = args
+
+ stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
+ if err != nil {
+ // Check for 'go' executable not being found.
+ if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
+ }
+
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't run 'go': %w", err)
+ }
+
+ // Old go version?
+ if strings.Contains(stderr.String(), "flag provided but not defined") {
+ return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
+ }
+
+ // Related to #24854
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
+ return nil, friendlyErr
+ }
+
+ // Return an error if 'go list' failed due to missing tools in
+ // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
+ return nil, friendlyErr
+ }
+
+ // Is there an error running the C compiler in cgo? This will be reported in the "Error" field
+ // and should be suppressed by go list -e.
+ //
+ // This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
+ isPkgPathRune := func(r rune) bool {
+ // From https://golang.org/ref/spec#Import_declarations:
+ // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
+ // using only characters belonging to Unicode's L, M, N, P, and S general categories
+ // (the Graphic characters without spaces) and may also exclude the
+ // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
+ return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
+ !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
+ }
+ // golang/go#36770: Handle case where cmd/go prints module download messages before the error.
+ msg := stderr.String()
+ for strings.HasPrefix(msg, "go: downloading") {
+ msg = msg[strings.IndexRune(msg, '\n')+1:]
+ }
+ if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
+ msg := msg[len("# "):]
+ if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
+ return stdout, nil
+ }
+ // Treat pkg-config errors as a special case (golang.org/issue/36770).
+ if strings.HasPrefix(msg, "pkg-config") {
+ return stdout, nil
+ }
+ }
+
+ // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
+ // the error in the Err section of stdout in case -e option is provided.
+ // This fix is provided for backwards compatibility.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Similar to the previous error, but currently lacks a fix in Go.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
+ // If the package doesn't exist, put the absolute path of the directory into the error message,
+ // as Go 1.13 list does.
+ const noSuchDirectory = "no such directory"
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
+ errstr := stderr.String()
+ abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ abspath, strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
+ // Note that the error message we look for in this case is different that the one looked for above.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
+ // directory outside any module.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ // TODO(matloob): command-line-arguments isn't correct here.
+ "command-line-arguments", strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Another variation of the previous error
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ // TODO(matloob): command-line-arguments isn't correct here.
+ "command-line-arguments", strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
+ // status if there's a dependency on a package that doesn't exist. But it should return
+ // a zero exit status and set an error on that package.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
+ // Don't clobber stdout if `go list` actually returned something.
+ if len(stdout.String()) > 0 {
+ return stdout, nil
+ }
+ // try to extract package name from string
+ stderrStr := stderr.String()
+ var importPath string
+ colon := strings.Index(stderrStr, ":")
+ if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
+ importPath = stderrStr[len("go build "):colon]
+ }
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ importPath, strings.Trim(stderrStr, "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ // The same is true if an ad-hoc package given to go list doesn't exist.
+ // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
+ // packages don't exist or a build fails.
+ if !usesExportData(cfg) && !containsGoFile(args) {
+ return nil, friendlyErr
+ }
+ }
+ return stdout, nil
+}
+
+func containsGoFile(s []string) bool {
+ for _, f := range s {
+ if strings.HasSuffix(f, ".go") {
+ return true
+ }
+ }
+ return false
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.SplitN(kv, "=", 2)
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+
+ var args []string
+ for _, arg := range cmd.Args {
+ quoted := strconv.Quote(arg)
+ if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
+ args = append(args, quoted)
+ } else {
+ args = append(args, arg)
+ }
+ }
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
+}
+
+// getSizesForArgs queries 'go list' for the appropriate
+// Compiler and GOARCH arguments to pass to [types.SizesFor].
+func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
+ inv.Verb = "list"
+ inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
+ stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
+ var goarch, compiler string
+ if rawErr != nil {
+ rawErrMsg := rawErr.Error()
+ if strings.Contains(rawErrMsg, "cannot find main module") ||
+ strings.Contains(rawErrMsg, "go.mod file not found") {
+ // User's running outside of a module.
+ // All bets are off. Get GOARCH and guess compiler is gc.
+ // TODO(matloob): Is this a problem in practice?
+ inv.Verb = "env"
+ inv.Args = []string{"GOARCH"}
+ envout, enverr := gocmdRunner.Run(ctx, inv)
+ if enverr != nil {
+ return "", "", enverr
+ }
+ goarch = strings.TrimSpace(envout.String())
+ compiler = "gc"
+ } else if friendlyErr != nil {
+ return "", "", friendlyErr
+ } else {
+ // This should be unreachable, but be defensive
+ // in case RunRaw's error results are inconsistent.
+ return "", "", rawErr
+ }
+ } else {
+ fields := strings.Fields(stdout.String())
+ if len(fields) < 2 {
+ return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>",
+ stdout.String(), stderr.String())
+ }
+ goarch = fields[0]
+ compiler = fields[1]
+ }
+ return compiler, goarch, nil
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
new file mode 100644
index 0000000000..d9d5a45cd4
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "encoding/json"
+ "path/filepath"
+
+ "golang.org/x/tools/internal/gocommand"
+)
+
+// determineRootDirs returns a mapping from absolute directories that could
+// contain code to their corresponding import path prefixes.
+func (state *golistState) determineRootDirs() (map[string]string, error) {
+ env, err := state.getEnv()
+ if err != nil {
+ return nil, err
+ }
+ if env["GOMOD"] != "" {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
+ })
+ } else {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
+ })
+ }
+ return state.rootDirs, state.rootDirsError
+}
+
+func (state *golistState) determineRootDirsModules() (map[string]string, error) {
+ // List all of the modules--the first will be the directory for the main
+ // module. Any replaced modules will also need to be treated as roots.
+ // Editing files in the module cache isn't a great idea, so we don't
+ // plan to ever support that.
+ out, err := state.invokeGo("list", "-m", "-json", "all")
+ if err != nil {
+ // 'go list all' will fail if we're outside of a module and
+ // GO111MODULE=on. Try falling back without 'all'.
+ var innerErr error
+ out, innerErr = state.invokeGo("list", "-m", "-json")
+ if innerErr != nil {
+ return nil, err
+ }
+ }
+ roots := map[string]string{}
+ modules := map[string]string{}
+ var i int
+ for dec := json.NewDecoder(out); dec.More(); {
+ mod := new(gocommand.ModuleJSON)
+ if err := dec.Decode(mod); err != nil {
+ return nil, err
+ }
+ if mod.Dir != "" && mod.Path != "" {
+ // This is a valid module; add it to the map.
+ absDir, err := state.cfg.abs(mod.Dir)
+ if err != nil {
+ return nil, err
+ }
+ modules[absDir] = mod.Path
+ // The first result is the main module.
+ if i == 0 || mod.Replace != nil && mod.Replace.Path != "" {
+ roots[absDir] = mod.Path
+ }
+ }
+ i++
+ }
+ return roots, nil
+}
+
+func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
+ m := map[string]string{}
+ for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
+ absDir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ m[filepath.Join(absDir, "src")] = ""
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
new file mode 100644
index 0000000000..69eec9f44d
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -0,0 +1,56 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "fmt"
+ "strings"
+)
+
+var modes = [...]struct {
+ mode LoadMode
+ name string
+}{
+ {NeedName, "NeedName"},
+ {NeedFiles, "NeedFiles"},
+ {NeedCompiledGoFiles, "NeedCompiledGoFiles"},
+ {NeedImports, "NeedImports"},
+ {NeedDeps, "NeedDeps"},
+ {NeedExportFile, "NeedExportFile"},
+ {NeedTypes, "NeedTypes"},
+ {NeedSyntax, "NeedSyntax"},
+ {NeedTypesInfo, "NeedTypesInfo"},
+ {NeedTypesSizes, "NeedTypesSizes"},
+ {NeedForTest, "NeedForTest"},
+ {NeedModule, "NeedModule"},
+ {NeedEmbedFiles, "NeedEmbedFiles"},
+ {NeedEmbedPatterns, "NeedEmbedPatterns"},
+ {NeedTarget, "NeedTarget"},
+}
+
+func (mode LoadMode) String() string {
+ if mode == 0 {
+ return "LoadMode(0)"
+ }
+ var out []string
+ // named bits
+ for _, item := range modes {
+ if (mode & item.mode) != 0 {
+ mode ^= item.mode
+ out = append(out, item.name)
+ }
+ }
+ // unnamed residue
+ if mode != 0 {
+ if out == nil {
+ return fmt.Sprintf("LoadMode(%#x)", int(mode))
+ }
+ out = append(out, fmt.Sprintf("%#x", int(mode)))
+ }
+ if len(out) == 1 {
+ return out[0]
+ }
+ return "(" + strings.Join(out, "|") + ")"
+}
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
new file mode 100644
index 0000000000..060ab08efb
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -0,0 +1,1559 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+
+ "golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// A LoadMode controls the amount of detail to return when loading.
+// The bits below can be combined to specify which fields should be
+// filled in the result packages.
+//
+// The zero value is a special case, equivalent to combining
+// the NeedName, NeedFiles, and NeedCompiledGoFiles bits.
+//
+// ID and Errors (if present) will always be filled.
+// [Load] may return more information than requested.
+//
+// The Mode flag is a union of several bits named NeedName,
+// NeedFiles, and so on, each of which determines whether
+// a given field of Package (Name, Files, etc) should be
+// populated.
+//
+// For convenience, we provide named constants for the most
+// common combinations of Need flags:
+//
+// [LoadFiles] lists of files in each package
+// [LoadImports] ... plus imports
+// [LoadTypes] ... plus type information
+// [LoadSyntax] ... plus type-annotated syntax
+// [LoadAllSyntax] ... for all dependencies
+//
+// Unfortunately there are a number of open bugs related to
+// interactions among the LoadMode bits:
+// - https://go.dev/issue/56633
+// - https://go.dev/issue/56677
+// - https://go.dev/issue/58726
+// - https://go.dev/issue/63517
+type LoadMode int
+
+const (
+ // NeedName adds Name and PkgPath.
+ NeedName LoadMode = 1 << iota
+
+ // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
+ NeedFiles
+
+ // NeedCompiledGoFiles adds CompiledGoFiles.
+ NeedCompiledGoFiles
+
+ // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
+ // "placeholder" Packages with only the ID set.
+ NeedImports
+
+ // NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
+ NeedDeps
+
+ // NeedExportFile adds ExportFile.
+ NeedExportFile
+
+ // NeedTypes adds Types, Fset, and IllTyped.
+ NeedTypes
+
+ // NeedSyntax adds Syntax and Fset.
+ NeedSyntax
+
+ // NeedTypesInfo adds TypesInfo and Fset.
+ NeedTypesInfo
+
+ // NeedTypesSizes adds TypesSizes.
+ NeedTypesSizes
+
+ // needInternalDepsErrors adds the internal deps errors field for use by gopls.
+ needInternalDepsErrors
+
+ // NeedForTest adds ForTest.
+ //
+ // Tests must also be set on the context for this field to be populated.
+ NeedForTest
+
+ // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
+ // Modifies CompiledGoFiles and Types, and has no effect on its own.
+ typecheckCgo
+
+ // NeedModule adds Module.
+ NeedModule
+
+ // NeedEmbedFiles adds EmbedFiles.
+ NeedEmbedFiles
+
+ // NeedEmbedPatterns adds EmbedPatterns.
+ NeedEmbedPatterns
+
+ // NeedTarget adds Target.
+ NeedTarget
+
+ // Be sure to update loadmode_string.go when adding new items!
+)
+
+const (
+ // LoadFiles loads the name and file names for the initial packages.
+ LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
+
+ // LoadImports loads the name, file names, and import mapping for the initial packages.
+ LoadImports = LoadFiles | NeedImports
+
+ // LoadTypes loads exported type information for the initial packages.
+ LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
+
+ // LoadSyntax loads typed syntax for the initial packages.
+ LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
+
+ // LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
+ LoadAllSyntax = LoadSyntax | NeedDeps
+
+ // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
+ //
+ //go:fix inline
+ NeedExportsFile = NeedExportFile
+)
+
+// A Config specifies details about how packages should be loaded.
+// The zero value is a valid configuration.
+//
+// Calls to [Load] do not modify this struct.
+type Config struct {
+ // Mode controls the level of information returned for each package.
+ Mode LoadMode
+
+ // Context specifies the context for the load operation.
+ // Cancelling the context may cause [Load] to abort and
+ // return an error.
+ Context context.Context
+
+ // Logf is the logger for the config.
+ // If the user provides a logger, debug logging is enabled.
+ // If the GOPACKAGESDEBUG environment variable is set to true,
+ // but the logger is nil, default to log.Printf.
+ Logf func(format string, args ...any)
+
+ // Dir is the directory in which to run the build system's query tool
+ // that provides information about the packages.
+ // If Dir is empty, the tool is run in the current directory.
+ Dir string
+
+ // Env is the environment to use when invoking the build system's query tool.
+ // If Env is nil, the current environment is used.
+ // As in os/exec's Cmd, only the last value in the slice for
+ // each environment key is used. To specify the setting of only
+ // a few variables, append to the current environment, as in:
+ //
+ // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
+ //
+ Env []string
+
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // Fset provides source position information for syntax trees and types.
+ // If Fset is nil, Load will use a new fileset, but preserve Fset's value.
+ Fset *token.FileSet
+
+ // ParseFile is called to read and parse each file
+ // when preparing a package's type-checked syntax tree.
+ // It must be safe to call ParseFile simultaneously from multiple goroutines.
+ // If ParseFile is nil, the loader will uses parser.ParseFile.
+ //
+ // ParseFile should parse the source from src and use filename only for
+ // recording position information.
+ //
+ // An application may supply a custom implementation of ParseFile
+ // to change the effective file contents or the behavior of the parser,
+ // or to modify the syntax tree. For example, selectively eliminating
+ // unwanted function bodies can significantly accelerate type checking.
+ ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
+
+ // If Tests is set, the loader includes not just the packages
+ // matching a particular pattern but also any related test packages,
+ // including test-only variants of the package and the test executable.
+ //
+ // For example, when using the go command, loading "fmt" with Tests=true
+ // returns four packages, with IDs "fmt" (the standard package),
+ // "fmt [fmt.test]" (the package as compiled for the test),
+ // "fmt_test" (the test functions from source files in package fmt_test),
+ // and "fmt.test" (the test binary).
+ //
+ // In build systems with explicit names for tests,
+ // setting Tests may have no effect.
+ Tests bool
+
+ // Overlay is a mapping from absolute file paths to file contents.
+ //
+ // For each map entry, [Load] uses the alternative file
+ // contents provided by the overlay mapping instead of reading
+ // from the file system. This mechanism can be used to enable
+ // editor-integrated tools to correctly analyze the contents
+ // of modified but unsaved buffers, for example.
+ //
+ // The overlay mapping is passed to the build system's driver
+ // (see "The driver protocol") so that it too can report
+ // consistent package metadata about unsaved files. However,
+ // drivers may vary in their level of support for overlays.
+ Overlay map[string][]byte
+}
+
+// Load loads and returns the Go packages named by the given patterns.
+//
+// The cfg parameter specifies loading options; nil behaves the same as an empty [Config].
+//
+// The [Config.Mode] field is a set of bits that determine what kinds
+// of information should be computed and returned. Modes that require
+// more information tend to be slower. See [LoadMode] for details
+// and important caveats. Its zero value is equivalent to
+// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles].
+//
+// Each call to Load returns a new set of [Package] instances.
+// The Packages and their Imports form a directed acyclic graph.
+//
+// If the [NeedTypes] mode flag was set, each call to Load uses a new
+// [types.Importer], so [types.Object] and [types.Type] values from
+// different calls to Load must not be mixed as they will have
+// inconsistent notions of type identity.
+//
+// If any of the patterns was invalid as defined by the
+// underlying build system, Load returns an error.
+// It may return an empty list of packages without an error,
+// for instance for an empty expansion of a valid wildcard.
+// Errors associated with a particular package are recorded in the
+// corresponding Package's Errors list, and do not cause Load to
+// return an error. Clients may need to handle such errors before
+// proceeding with further analysis. The [PrintErrors] function is
+// provided for convenient display of all errors.
+func Load(cfg *Config, patterns ...string) ([]*Package, error) {
+ ld := newLoader(cfg)
+ response, external, err := defaultDriver(&ld.Config, patterns...)
+ if err != nil {
+ return nil, err
+ }
+
+ ld.sizes = types.SizesFor(response.Compiler, response.Arch)
+ if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 {
+ // Type size information is needed but unavailable.
+ if external {
+ // An external driver may fail to populate the Compiler/GOARCH fields,
+ // especially since they are relatively new (see #63700).
+ // Provide a sensible fallback in this case.
+ ld.sizes = types.SizesFor("gc", runtime.GOARCH)
+ if ld.sizes == nil { // gccgo-only arch
+ ld.sizes = types.SizesFor("gc", "amd64")
+ }
+ } else {
+ // Go list should never fail to deliver accurate size information.
+ // Reject the whole Load since the error is the same for every package.
+ return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q",
+ response.Compiler, response.Arch)
+ }
+ }
+
+ return ld.refine(response)
+}
+
+// defaultDriver is a driver that implements go/packages' fallback behavior.
+// It will try to request to an external driver, if one exists. If there's
+// no external driver, or the driver returns a response with NotHandled set,
+// defaultDriver will fall back to the go list driver.
+// The boolean result indicates that an external driver handled the request.
+func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) {
+ const (
+ // windowsArgMax specifies the maximum command line length for
+ // the Windows' CreateProcess function.
+ windowsArgMax = 32767
+ // maxEnvSize is a very rough estimation of the maximum environment
+ // size of a user.
+ maxEnvSize = 16384
+ // safeArgMax specifies the maximum safe command line length to use
+ // by the underlying driver excl. the environment. We choose the Windows'
+ // ARG_MAX as the starting point because it's one of the lowest ARG_MAX
+ // constants out of the different supported platforms,
+ // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results.
+ safeArgMax = windowsArgMax - maxEnvSize
+ )
+ chunks, err := splitIntoChunks(patterns, safeArgMax)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if driver := findExternalDriver(cfg); driver != nil {
+ response, err := callDriverOnChunks(driver, cfg, chunks)
+ if err != nil {
+ return nil, false, err
+ } else if !response.NotHandled {
+ return response, true, nil
+ }
+ // not handled: fall through
+ }
+
+ // go list fallback
+
+ // Write overlays once, as there are many calls
+ // to 'go list' (one per chunk plus others too).
+ overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
+ if err != nil {
+ return nil, false, err
+ }
+ defer cleanupOverlay()
+
+ var runner gocommand.Runner // (shared across many 'go list' calls)
+ driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
+ return goListDriver(cfg, &runner, overlayFile, patterns)
+ }
+ response, err := callDriverOnChunks(driver, cfg, chunks)
+ if err != nil {
+ return nil, false, err
+ }
+ return response, false, err
+}
+
+// splitIntoChunks chunks the slice so that the total number of characters
+// in a chunk is no longer than argMax.
+func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
+ if argMax <= 0 {
+ return nil, errors.New("failed to split patterns into chunks, negative safe argMax value")
+ }
+ var chunks [][]string
+ charsInChunk := 0
+ nextChunkStart := 0
+ for i, v := range patterns {
+ vChars := len(v)
+ if vChars > argMax {
+ // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen
+ return nil, errors.New("failed to split patterns into chunks, a pattern is too long")
+ }
+ charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too
+ if charsInChunk > argMax {
+ chunks = append(chunks, patterns[nextChunkStart:i])
+ nextChunkStart = i
+ charsInChunk = vChars
+ }
+ }
+ // add the last chunk
+ if nextChunkStart < len(patterns) {
+ chunks = append(chunks, patterns[nextChunkStart:])
+ }
+ return chunks, nil
+}
+
+func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
+ if len(chunks) == 0 {
+ return driver(cfg, nil)
+ }
+ responses := make([]*DriverResponse, len(chunks))
+ errNotHandled := errors.New("driver returned NotHandled")
+ var g errgroup.Group
+ for i, chunk := range chunks {
+ g.Go(func() (err error) {
+ responses[i], err = driver(cfg, chunk)
+ if responses[i] != nil && responses[i].NotHandled {
+ err = errNotHandled
+ }
+ return err
+ })
+ }
+ if err := g.Wait(); err != nil {
+ if errors.Is(err, errNotHandled) {
+ return &DriverResponse{NotHandled: true}, nil
+ }
+ return nil, err
+ }
+ return mergeResponses(responses...), nil
+}
+
+func mergeResponses(responses ...*DriverResponse) *DriverResponse {
+ if len(responses) == 0 {
+ return nil
+ }
+ response := newDeduper()
+ response.dr.NotHandled = false
+ response.dr.Compiler = responses[0].Compiler
+ response.dr.Arch = responses[0].Arch
+ response.dr.GoVersion = responses[0].GoVersion
+ for _, v := range responses {
+ response.addAll(v)
+ }
+ return response.dr
+}
+
+// A Package describes a loaded Go package.
+//
+// It also defines part of the JSON schema of [DriverResponse].
+// See the package documentation for an overview.
+type Package struct {
+ // ID is a unique identifier for a package,
+ // in a syntax provided by the underlying build system.
+ //
+ // Because the syntax varies based on the build system,
+ // clients should treat IDs as opaque and not attempt to
+ // interpret them.
+ ID string
+
+ // Name is the package name as it appears in the package source code.
+ Name string
+
+ // PkgPath is the package path as used by the go/types package.
+ PkgPath string
+
+ // Dir is the directory associated with the package, if it exists.
+ //
+ // For packages listed by the go command, this is the directory containing
+ // the package files.
+ Dir string
+
+ // Errors contains any errors encountered querying the metadata
+ // of the package, or while parsing or type-checking its files.
+ Errors []Error
+
+ // TypeErrors contains the subset of errors produced during type checking.
+ TypeErrors []types.Error
+
+ // GoFiles lists the absolute file paths of the package's Go source files.
+ // It may include files that should not be compiled, for example because
+ // they contain non-matching build tags, are documentary pseudo-files such as
+ // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.
+ GoFiles []string
+
+ // CompiledGoFiles lists the absolute file paths of the package's source
+ // files that are suitable for type checking.
+ // This may differ from GoFiles if files are processed before compilation.
+ CompiledGoFiles []string
+
+ // OtherFiles lists the absolute file paths of the package's non-Go source files,
+ // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
+ OtherFiles []string
+
+ // EmbedFiles lists the absolute file paths of the package's files
+ // embedded with go:embed.
+ EmbedFiles []string
+
+ // EmbedPatterns lists the absolute file patterns of the package's
+ // files embedded with go:embed.
+ EmbedPatterns []string
+
+ // IgnoredFiles lists source files that are not part of the package
+ // using the current build configuration but that might be part of
+ // the package using other build configurations.
+ IgnoredFiles []string
+
+ // ExportFile is the absolute path to a file containing type
+ // information for the package as provided by the build system.
+ ExportFile string
+
+ // Target is the absolute install path of the .a file, for libraries,
+ // and of the executable file, for binaries.
+ Target string
+
+ // Imports maps import paths appearing in the package's Go source files
+ // to corresponding loaded Packages.
+ Imports map[string]*Package
+
+ // Module is the module information for the package if it exists.
+ //
+ // Note: it may be missing for std and cmd; see Go issue #65816.
+ Module *Module
+
+ // -- The following fields are not part of the driver JSON schema. --
+
+ // Types provides type information for the package.
+ // The NeedTypes LoadMode bit sets this field for packages matching the
+ // patterns; type information for dependencies may be missing or incomplete,
+ // unless NeedDeps and NeedImports are also set.
+ //
+ // Each call to [Load] returns a consistent set of type
+ // symbols, as defined by the comment at [types.Identical].
+ // Avoid mixing type information from two or more calls to [Load].
+ Types *types.Package `json:"-"`
+
+ // Fset provides position information for Types, TypesInfo, and Syntax.
+ // It is set only when Types is set.
+ Fset *token.FileSet `json:"-"`
+
+ // IllTyped indicates whether the package or any dependency contains errors.
+ // It is set only when Types is set.
+ IllTyped bool `json:"-"`
+
+ // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
+ //
+ // The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
+ // If NeedDeps and NeedImports are also set, this field will also be populated
+ // for dependencies.
+ //
+ // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are
+ // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles.
+ Syntax []*ast.File `json:"-"`
+
+ // TypesInfo provides type information about the package's syntax trees.
+ // It is set only when Syntax is set.
+ TypesInfo *types.Info `json:"-"`
+
+ // TypesSizes provides the effective size function for types in TypesInfo.
+ TypesSizes types.Sizes `json:"-"`
+
+ // -- internal --
+
+ // ForTest is the package under test, if any.
+ ForTest string
+
+ // depsErrors is the DepsErrors field from the go list response, if any.
+ depsErrors []*packagesinternal.PackageError
+}
+
+// Module provides module information for a package.
+//
+// It also defines part of the JSON schema of [DriverResponse].
+// See the package documentation for an overview.
+type Module struct {
+ Path string // module path
+ Version string // module version
+ Replace *Module // replaced by this module
+ Time *time.Time // time version was created
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file used when loading this module, if any
+ GoVersion string // go version used in module
+ Error *ModuleError // error loading module
+}
+
+// ModuleError holds errors loading a module.
+type ModuleError struct {
+ Err string // the error itself
+}
+
+func init() {
+ packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError {
+ return p.(*Package).depsErrors
+ }
+ packagesinternal.TypecheckCgo = int(typecheckCgo)
+ packagesinternal.DepsErrors = int(needInternalDepsErrors)
+}
+
+// An Error describes a problem with a package's metadata, syntax, or types.
+type Error struct {
+ Pos string // "file:line:col" or "file:line" or "" or "-"
+ Msg string
+ Kind ErrorKind
+}
+
+// ErrorKind describes the source of the error, allowing the user to
+// differentiate between errors generated by the driver, the parser, or the
+// type-checker.
+type ErrorKind int
+
+const (
+ UnknownError ErrorKind = iota
+ ListError
+ ParseError
+ TypeError
+)
+
+func (err Error) Error() string {
+ pos := err.Pos
+ if pos == "" {
+ pos = "-" // like token.Position{}.String()
+ }
+ return pos + ": " + err.Msg
+}
+
+// flatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports
+//
+// TODO(adonovan): identify this struct with Package, effectively
+// publishing the JSON protocol.
+type flatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Errors []Error `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ EmbedFiles []string `json:",omitempty"`
+ EmbedPatterns []string `json:",omitempty"`
+ IgnoredFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+}
+
+// MarshalJSON returns the Package in its JSON form.
+// For the most part, the structure fields are written out unmodified, and
+// the type and syntax fields are skipped.
+// The imports are written out as just a map of path to package id.
+// The errors are written using a custom type that tries to preserve the
+// structure of error types we know about.
+//
+// This method exists to enable support for additional build systems. It is
+// not intended for use by clients of the API and we may change the format.
+func (p *Package) MarshalJSON() ([]byte, error) {
+ flat := &flatPackage{
+ ID: p.ID,
+ Name: p.Name,
+ PkgPath: p.PkgPath,
+ Errors: p.Errors,
+ GoFiles: p.GoFiles,
+ CompiledGoFiles: p.CompiledGoFiles,
+ OtherFiles: p.OtherFiles,
+ EmbedFiles: p.EmbedFiles,
+ EmbedPatterns: p.EmbedPatterns,
+ IgnoredFiles: p.IgnoredFiles,
+ ExportFile: p.ExportFile,
+ }
+ if len(p.Imports) > 0 {
+ flat.Imports = make(map[string]string, len(p.Imports))
+ for path, ipkg := range p.Imports {
+ flat.Imports[path] = ipkg.ID
+ }
+ }
+ return json.Marshal(flat)
+}
+
+// UnmarshalJSON reads in a Package from its JSON format.
+// See MarshalJSON for details about the format accepted.
+func (p *Package) UnmarshalJSON(b []byte) error {
+ flat := &flatPackage{}
+ if err := json.Unmarshal(b, &flat); err != nil {
+ return err
+ }
+ *p = Package{
+ ID: flat.ID,
+ Name: flat.Name,
+ PkgPath: flat.PkgPath,
+ Errors: flat.Errors,
+ GoFiles: flat.GoFiles,
+ CompiledGoFiles: flat.CompiledGoFiles,
+ OtherFiles: flat.OtherFiles,
+ EmbedFiles: flat.EmbedFiles,
+ EmbedPatterns: flat.EmbedPatterns,
+ IgnoredFiles: flat.IgnoredFiles,
+ ExportFile: flat.ExportFile,
+ }
+ if len(flat.Imports) > 0 {
+ p.Imports = make(map[string]*Package, len(flat.Imports))
+ for path, id := range flat.Imports {
+ p.Imports[path] = &Package{ID: id}
+ }
+ }
+ return nil
+}
+
+func (p *Package) String() string { return p.ID }
+
+// loaderPackage augments Package with state used during the loading phase
+type loaderPackage struct {
+ *Package
+ importErrors map[string]error // maps each bad import to its error
+ preds []*loaderPackage // packages that import this one
+ unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded
+ color uint8 // for cycle detection
+ needsrc bool // load from source (Mode >= LoadTypes)
+ needtypes bool // type information is either requested or depended on
+ initial bool // package was matched by a pattern
+ goVersion int // minor version number of go command on PATH
+}
+
+// loader holds the working state of a single call to load.
+type loader struct {
+ pkgs map[string]*loaderPackage // keyed by Package.ID
+ Config
+ sizes types.Sizes // non-nil if needed by mode
+ parseCache map[string]*parseValue
+ parseCacheMu sync.Mutex
+ exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
+
+ // Config.Mode contains the implied mode (see impliedLoadMode).
+ // Implied mode contains all the fields we need the data for.
+ // In requestedMode there are the actually requested fields.
+ // We'll zero them out before returning packages to the user.
+ // This makes it easier for us to get the conditions where
+ // we need certain modes right.
+ requestedMode LoadMode
+}
+
+type parseValue struct {
+ f *ast.File
+ err error
+ ready chan struct{}
+}
+
+func newLoader(cfg *Config) *loader {
+ ld := &loader{
+ parseCache: map[string]*parseValue{},
+ }
+ if cfg != nil {
+ ld.Config = *cfg
+ // If the user has provided a logger, use it.
+ ld.Config.Logf = cfg.Logf
+ }
+ if ld.Config.Logf == nil {
+ // If the GOPACKAGESDEBUG environment variable is set to true,
+ // but the user has not provided a logger, default to log.Printf.
+ if debug {
+ ld.Config.Logf = log.Printf
+ } else {
+ ld.Config.Logf = func(format string, args ...any) {}
+ }
+ }
+ if ld.Config.Mode == 0 {
+ ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
+ }
+ if ld.Config.Env == nil {
+ ld.Config.Env = os.Environ()
+ }
+ if ld.Context == nil {
+ ld.Context = context.Background()
+ }
+ if ld.Dir == "" {
+ if dir, err := os.Getwd(); err == nil {
+ ld.Dir = dir
+ }
+ }
+
+ // Save the actually requested fields. We'll zero them out before returning packages to the user.
+ ld.requestedMode = ld.Mode
+ ld.Mode = impliedLoadMode(ld.Mode)
+
+ if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+ if ld.Fset == nil {
+ ld.Fset = token.NewFileSet()
+ }
+
+ // ParseFile is required even in LoadTypes mode
+ // because we load source if export data is missing.
+ if ld.ParseFile == nil {
+ ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+ // We implicitly promise to keep doing ast.Object resolution. :(
+ const mode = parser.AllErrors | parser.ParseComments
+ return parser.ParseFile(fset, filename, src, mode)
+ }
+ }
+ }
+
+ return ld
+}
+
+// refine connects the supplied packages into a graph and then adds type
+// and syntax information as requested by the LoadMode.
+func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
+ roots := response.Roots
+ rootMap := make(map[string]int, len(roots))
+ for i, root := range roots {
+ rootMap[root] = i
+ }
+ ld.pkgs = make(map[string]*loaderPackage)
+ // first pass, fixup and build the map and roots
+ var initial = make([]*loaderPackage, len(roots))
+ for _, pkg := range response.Packages {
+ rootIndex := -1
+ if i, found := rootMap[pkg.ID]; found {
+ rootIndex = i
+ }
+
+ // Overlays can invalidate export data.
+ // TODO(matloob): make this check fine-grained based on dependencies on overlaid files
+ exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
+ // This package needs type information if the caller requested types and the package is
+ // either a root, or it's a non-root and the user requested dependencies ...
+ needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
+ // This package needs source if the call requested source (or types info, which implies source)
+ // and the package is either a root, or itas a non- root and the user requested dependencies...
+ needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
+ // ... or if we need types and the exportData is invalid. We fall back to (incompletely)
+ // typechecking packages from source if they fail to compile.
+ (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
+ lpkg := &loaderPackage{
+ Package: pkg,
+ needtypes: needtypes,
+ needsrc: needsrc,
+ goVersion: response.GoVersion,
+ }
+ ld.pkgs[lpkg.ID] = lpkg
+ if rootIndex >= 0 {
+ initial[rootIndex] = lpkg
+ lpkg.initial = true
+ }
+ }
+ for i, root := range roots {
+ if initial[i] == nil {
+ return nil, fmt.Errorf("root package %v is missing", root)
+ }
+ }
+
+ // Materialize the import graph if it is needed (NeedImports),
+ // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
+ var leaves []*loaderPackage // packages with no unfinished successors
+ if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+ const (
+ white = 0 // new
+ grey = 1 // in progress
+ black = 2 // complete
+ )
+
+ // visit traverses the import graph, depth-first,
+ // and materializes the graph as Packages.Imports.
+ //
+ // Valid imports are saved in the Packages.Import map.
+ // Invalid imports (cycles and missing nodes) are saved in the importErrors map.
+ // Thus, even in the presence of both kinds of errors,
+ // the Import graph remains a DAG.
+ //
+ // visit returns whether the package needs src or has a transitive
+ // dependency on a package that does. These are the only packages
+ // for which we load source code.
+ var stack []*loaderPackage
+ var visit func(from, lpkg *loaderPackage) bool
+ visit = func(from, lpkg *loaderPackage) bool {
+ if lpkg.color == grey {
+ panic("internal error: grey node")
+ }
+ if lpkg.color == white {
+ lpkg.color = grey
+ stack = append(stack, lpkg) // push
+ stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
+ lpkg.Imports = make(map[string]*Package, len(stubs))
+ for importPath, ipkg := range stubs {
+ var importErr error
+ imp := ld.pkgs[ipkg.ID]
+ if imp == nil {
+ // (includes package "C" when DisableCgo)
+ importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+ } else if imp.color == grey {
+ importErr = fmt.Errorf("import cycle: %s", stack)
+ }
+ if importErr != nil {
+ if lpkg.importErrors == nil {
+ lpkg.importErrors = make(map[string]error)
+ }
+ lpkg.importErrors[importPath] = importErr
+ continue
+ }
+
+ if visit(lpkg, imp) {
+ lpkg.needsrc = true
+ }
+ lpkg.Imports[importPath] = imp.Package
+ }
+
+ // -- postorder --
+
+ // Complete type information is required for the
+ // immediate dependencies of each source package.
+ if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
+ for _, ipkg := range lpkg.Imports {
+ ld.pkgs[ipkg.ID].needtypes = true
+ }
+ }
+
+ // NeedTypeSizes causes TypeSizes to be set even
+ // on packages for which types aren't needed.
+ if ld.Mode&NeedTypesSizes != 0 {
+ lpkg.TypesSizes = ld.sizes
+ }
+
+ // Add packages with no imports directly to the queue of leaves.
+ if len(lpkg.Imports) == 0 {
+ leaves = append(leaves, lpkg)
+ }
+
+ stack = stack[:len(stack)-1] // pop
+ lpkg.color = black
+ }
+
+ // Add edge from predecessor.
+ if from != nil {
+ from.unfinishedSuccs.Add(+1) // incref
+ lpkg.preds = append(lpkg.preds, from)
+ }
+
+ return lpkg.needsrc
+ }
+
+ // For each initial package, create its import DAG.
+ for _, lpkg := range initial {
+ visit(nil, lpkg)
+ }
+
+ } else {
+ // !NeedImports: drop the stub (ID-only) import packages
+ // that we are not even going to try to resolve.
+ for _, lpkg := range initial {
+ lpkg.Imports = nil
+ }
+ }
+
+ // Load type data and syntax if needed, starting at
+ // the initial packages (roots of the import DAG).
+ if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+
+ // We avoid using g.SetLimit to limit concurrency as
+ // it makes g.Go stop accepting work, which prevents
+ // workers from enqeuing, and thus finishing, and thus
+ // allowing the group to make progress: deadlock.
+ //
+ // Instead we use the ioLimit and cpuLimit semaphores.
+ g, _ := errgroup.WithContext(ld.Context)
+
+ // enqueues adds a package to the type-checking queue.
+ // It must have no unfinished successors.
+ var enqueue func(*loaderPackage)
+ enqueue = func(lpkg *loaderPackage) {
+ g.Go(func() error {
+ // Parse and type-check.
+ ld.loadPackage(lpkg)
+
+ // Notify each waiting predecessor,
+ // and enqueue it when it becomes a leaf.
+ for _, pred := range lpkg.preds {
+ if pred.unfinishedSuccs.Add(-1) == 0 { // decref
+ enqueue(pred)
+ }
+ }
+
+ return nil
+ })
+ }
+
+ // Load leaves first, adding new packages
+ // to the queue as they become leaves.
+ for _, leaf := range leaves {
+ enqueue(leaf)
+ }
+
+ if err := g.Wait(); err != nil {
+ return nil, err // cancelled
+ }
+ }
+
+ // If the context is done, return its error and
+ // throw out [likely] incomplete packages.
+ if err := ld.Context.Err(); err != nil {
+ return nil, err
+ }
+
+ result := make([]*Package, len(initial))
+ for i, lpkg := range initial {
+ result[i] = lpkg.Package
+ }
+ for i := range ld.pkgs {
+ // Clear all unrequested fields,
+ // to catch programs that use more than they request.
+ if ld.requestedMode&NeedName == 0 {
+ ld.pkgs[i].Name = ""
+ ld.pkgs[i].PkgPath = ""
+ }
+ if ld.requestedMode&NeedFiles == 0 {
+ ld.pkgs[i].GoFiles = nil
+ ld.pkgs[i].OtherFiles = nil
+ ld.pkgs[i].IgnoredFiles = nil
+ }
+ if ld.requestedMode&NeedEmbedFiles == 0 {
+ ld.pkgs[i].EmbedFiles = nil
+ }
+ if ld.requestedMode&NeedEmbedPatterns == 0 {
+ ld.pkgs[i].EmbedPatterns = nil
+ }
+ if ld.requestedMode&NeedCompiledGoFiles == 0 {
+ ld.pkgs[i].CompiledGoFiles = nil
+ }
+ if ld.requestedMode&NeedImports == 0 {
+ ld.pkgs[i].Imports = nil
+ }
+ if ld.requestedMode&NeedExportFile == 0 {
+ ld.pkgs[i].ExportFile = ""
+ }
+ if ld.requestedMode&NeedTypes == 0 {
+ ld.pkgs[i].Types = nil
+ ld.pkgs[i].IllTyped = false
+ }
+ if ld.requestedMode&NeedSyntax == 0 {
+ ld.pkgs[i].Syntax = nil
+ }
+ if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
+ ld.pkgs[i].Fset = nil
+ }
+ if ld.requestedMode&NeedTypesInfo == 0 {
+ ld.pkgs[i].TypesInfo = nil
+ }
+ if ld.requestedMode&NeedTypesSizes == 0 {
+ ld.pkgs[i].TypesSizes = nil
+ }
+ if ld.requestedMode&NeedModule == 0 {
+ ld.pkgs[i].Module = nil
+ }
+ }
+
+ return result, nil
+}
+
+// loadPackage loads/parses/typechecks the specified package.
+// It must be called only once per Package,
+// after immediate dependencies are loaded.
+// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
+func (ld *loader) loadPackage(lpkg *loaderPackage) {
+ if lpkg.PkgPath == "unsafe" {
+ // Fill in the blanks to avoid surprises.
+ lpkg.Types = types.Unsafe
+ lpkg.Fset = ld.Fset
+ lpkg.Syntax = []*ast.File{}
+ lpkg.TypesInfo = new(types.Info)
+ lpkg.TypesSizes = ld.sizes
+ return
+ }
+
+ // Call NewPackage directly with explicit name.
+ // This avoids skew between golist and go/types when the files'
+ // package declarations are inconsistent.
+ lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
+ lpkg.Fset = ld.Fset
+
+ // Start shutting down if the context is done and do not load
+ // source or export data files.
+ // Packages that import this one will have ld.Context.Err() != nil.
+ // ld.Context.Err() will be returned later by refine.
+ if ld.Context.Err() != nil {
+ return
+ }
+
+ // Subtle: we populate all Types fields with an empty Package
+ // before loading export data so that export data processing
+ // never has to create a types.Package for an indirect dependency,
+ // which would then require that such created packages be explicitly
+ // inserted back into the Import graph as a final step after export data loading.
+ // (Hence this return is after the Types assignment.)
+ // The Diamond test exercises this case.
+ if !lpkg.needtypes && !lpkg.needsrc {
+ return
+ }
+
+ // TODO(adonovan): this condition looks wrong:
+ // I think it should be lpkg.needtypes && !lpg.needsrc,
+ // so that NeedSyntax without NeedTypes can be satisfied by export data.
+ if !lpkg.needsrc {
+ if err := ld.loadFromExportData(lpkg); err != nil {
+ lpkg.Errors = append(lpkg.Errors, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError, // e.g. can't find/open/parse export data
+ })
+ }
+ return // not a source package, don't get syntax trees
+ }
+
+ appendError := func(err error) {
+ // Convert various error types into the one true Error.
+ var errs []Error
+ switch err := err.(type) {
+ case Error:
+ // from driver
+ errs = append(errs, err)
+
+ case *os.PathError:
+ // from parser
+ errs = append(errs, Error{
+ Pos: err.Path + ":1",
+ Msg: err.Err.Error(),
+ Kind: ParseError,
+ })
+
+ case scanner.ErrorList:
+ // from parser
+ for _, err := range err {
+ errs = append(errs, Error{
+ Pos: err.Pos.String(),
+ Msg: err.Msg,
+ Kind: ParseError,
+ })
+ }
+
+ case types.Error:
+ // from type checker
+ lpkg.TypeErrors = append(lpkg.TypeErrors, err)
+ errs = append(errs, Error{
+ Pos: err.Fset.Position(err.Pos).String(),
+ Msg: err.Msg,
+ Kind: TypeError,
+ })
+
+ default:
+ // unexpected impoverished error from parser?
+ errs = append(errs, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError,
+ })
+
+ // If you see this error message, please file a bug.
+ log.Printf("internal error: error %q (%T) without position", err, err)
+ }
+
+ lpkg.Errors = append(lpkg.Errors, errs...)
+ }
+
+ // If the go command on the PATH is newer than the runtime,
+ // then the go/{scanner,ast,parser,types} packages from the
+ // standard library may be unable to process the files
+ // selected by go list.
+ //
+ // There is currently no way to downgrade the effective
+ // version of the go command (see issue 52078), so we proceed
+ // with the newer go command but, in case of parse or type
+ // errors, we emit an additional diagnostic.
+ //
+ // See:
+ // - golang.org/issue/52078 (flag to set release tags)
+ // - golang.org/issue/50825 (gopls legacy version support)
+ // - golang.org/issue/55883 (go/packages confusing error)
+ //
+ // Should we assert a hard minimum of (currently) go1.16 here?
+ var runtimeVersion int
+ if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
+ defer func() {
+ if len(lpkg.Errors) > 0 {
+ appendError(Error{
+ Pos: "-",
+ Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion),
+ Kind: UnknownError,
+ })
+ }
+ }()
+ }
+
+ if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
+ // The config requested loading sources and types, but sources are missing.
+ // Add an error to the package and fall back to loading from export data.
+ appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
+ _ = ld.loadFromExportData(lpkg) // ignore any secondary errors
+
+ return // can't get syntax trees for this package
+ }
+
+ files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
+ for _, err := range errs {
+ appendError(err)
+ }
+
+ lpkg.Syntax = files
+ if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
+ return
+ }
+
+ // Start shutting down if the context is done and do not type check.
+ // Packages that import this one will have ld.Context.Err() != nil.
+ // ld.Context.Err() will be returned later by refine.
+ if ld.Context.Err() != nil {
+ return
+ }
+
+ // Populate TypesInfo only if needed, as it
+ // causes the type checker to work much harder.
+ if ld.Config.Mode&NeedTypesInfo != 0 {
+ lpkg.TypesInfo = &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Instances: make(map[*ast.Ident]types.Instance),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ FileVersions: make(map[*ast.File]string),
+ }
+ }
+ lpkg.TypesSizes = ld.sizes
+
+ importer := importerFunc(func(path string) (*types.Package, error) {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // The imports map is keyed by import path.
+ ipkg := lpkg.Imports[path]
+ if ipkg == nil {
+ if err := lpkg.importErrors[path]; err != nil {
+ return nil, err
+ }
+ // There was skew between the metadata and the
+ // import declarations, likely due to an edit
+ // race, or because the ParseFile feature was
+ // used to supply alternative file contents.
+ return nil, fmt.Errorf("no metadata for %s", path)
+ }
+
+ if ipkg.Types != nil && ipkg.Types.Complete() {
+ return ipkg.Types, nil
+ }
+ log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg)
+ panic("unreachable")
+ })
+
+ // type-check
+ tc := &types.Config{
+ Importer: importer,
+
+ // Type-check bodies of functions only in initial packages.
+ // Example: for import graph A->B->C and initial packages {A,C},
+ // we can ignore function bodies in B.
+ IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
+
+ Error: appendError,
+ Sizes: ld.sizes, // may be nil
+ }
+ if lpkg.Module != nil && lpkg.Module.GoVersion != "" {
+ tc.GoVersion = "go" + lpkg.Module.GoVersion
+ }
+ if (ld.Mode & typecheckCgo) != 0 {
+ if !typesinternal.SetUsesCgo(tc) {
+ appendError(Error{
+ Msg: "typecheckCgo requires Go 1.15+",
+ Kind: ListError,
+ })
+ return
+ }
+ }
+
+ // Type-checking is CPU intensive.
+ cpuLimit <- unit{} // acquire a token
+ defer func() { <-cpuLimit }() // release a token
+
+ typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
+ lpkg.importErrors = nil // no longer needed
+
+ // In go/types go1.21 and go1.22, Checker.Files failed fast with a
+ // a "too new" error, without calling tc.Error and without
+ // proceeding to type-check the package (#66525).
+ // We rely on the runtimeVersion error to give the suggested remedy.
+ if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 {
+ if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") {
+ appendError(types.Error{
+ Fset: ld.Fset,
+ Pos: lpkg.Syntax[0].Package,
+ Msg: msg,
+ })
+ }
+ }
+
+ // If !Cgo, the type-checker uses FakeImportC mode, so
+ // it doesn't invoke the importer for import "C",
+ // nor report an error for the import,
+ // or for any undefined C.f reference.
+ // We must detect this explicitly and correctly
+ // mark the package as IllTyped (by reporting an error).
+ // TODO(adonovan): if these errors are annoying,
+ // we could just set IllTyped quietly.
+ if tc.FakeImportC {
+ outer:
+ for _, f := range lpkg.Syntax {
+ for _, imp := range f.Imports {
+ if imp.Path.Value == `"C"` {
+ err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
+ appendError(err)
+ break outer
+ }
+ }
+ }
+ }
+
+ // If types.Checker.Files had an error that was unreported,
+ // make sure to report the unknown error so the package is illTyped.
+ if typErr != nil && len(lpkg.Errors) == 0 {
+ appendError(typErr)
+ }
+
+ // Record accumulated errors.
+ illTyped := len(lpkg.Errors) > 0
+ if !illTyped {
+ for _, imp := range lpkg.Imports {
+ if imp.IllTyped {
+ illTyped = true
+ break
+ }
+ }
+ }
+ lpkg.IllTyped = illTyped
+}
+
+// An importFunc is an implementation of the single-method
+// types.Importer interface based on a function value.
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls or CPU threads per process.
+var (
+ ioLimit = make(chan unit, 20)
+ cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
+)
+
+func (ld *loader) parseFile(filename string) (*ast.File, error) {
+ ld.parseCacheMu.Lock()
+ v, ok := ld.parseCache[filename]
+ if ok {
+ // cache hit
+ ld.parseCacheMu.Unlock()
+ <-v.ready
+ } else {
+ // cache miss
+ v = &parseValue{ready: make(chan struct{})}
+ ld.parseCache[filename] = v
+ ld.parseCacheMu.Unlock()
+
+ var src []byte
+ for f, contents := range ld.Config.Overlay {
+ // TODO(adonovan): Inefficient for large overlays.
+ // Do an exact name-based map lookup
+ // (for nonexistent files) followed by a
+ // FileID-based map lookup (for existing ones).
+ if sameFile(f, filename) {
+ src = contents
+ break
+ }
+ }
+ var err error
+ if src == nil {
+ ioLimit <- unit{} // acquire a token
+ src, err = os.ReadFile(filename)
+ <-ioLimit // release a token
+ }
+ if err != nil {
+ v.err = err
+ } else {
+ // Parsing is CPU intensive.
+ cpuLimit <- unit{} // acquire a token
+ v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
+ <-cpuLimit // release a token
+ }
+
+ close(v.ready)
+ }
+ return v.f, v.err
+}
+
+// parseFiles reads and parses the Go source files and returns the ASTs
+// of the ones that could be at least partially parsed, along with a
+// list of I/O and parse errors encountered.
+//
+// Because files are scanned in parallel, the token.Pos
+// positions of the resulting ast.Files are not ordered.
+func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
+ var (
+ n = len(filenames)
+ parsed = make([]*ast.File, n)
+ errors = make([]error, n)
+ )
+ var g errgroup.Group
+ for i, filename := range filenames {
+ // This creates goroutines unnecessarily in the
+ // cache-hit case, but that case is uncommon.
+ g.Go(func() error {
+ parsed[i], errors[i] = ld.parseFile(filename)
+ return nil
+ })
+ }
+ g.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// sameFile returns true if x and y have the same basename and denote
+// the same file.
+func sameFile(x, y string) bool {
+ if x == y {
+ // It could be the case that y doesn't exist.
+ // For instance, it may be an overlay file that
+ // hasn't been written to disk. To handle that case
+ // let x == y through. (We added the exact absolute path
+ // string to the CompiledGoFiles list, so the unwritten
+ // overlay case implies x==y.)
+ return true
+ }
+ if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
+ if xi, err := os.Stat(x); err == nil {
+ if yi, err := os.Stat(y); err == nil {
+ return os.SameFile(xi, yi)
+ }
+ }
+ }
+ return false
+}
+
+// loadFromExportData ensures that type information is present for the specified
+// package, loading it from an export data file on the first request.
+// On success it sets lpkg.Types to a new Package.
+func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
+ if lpkg.PkgPath == "" {
+ log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
+ }
+
+ // Because gcexportdata.Read has the potential to create or
+ // modify the types.Package for each node in the transitive
+ // closure of dependencies of lpkg, all exportdata operations
+ // must be sequential. (Finer-grained locking would require
+ // changes to the gcexportdata API.)
+ //
+ // The exportMu lock guards the lpkg.Types field and the
+ // types.Package it points to, for each loaderPackage in the graph.
+ //
+ // Not all accesses to Package.Pkg need to be protected by exportMu:
+ // graph ordering ensures that direct dependencies of source
+ // packages are fully loaded before the importer reads their Pkg field.
+ ld.exportMu.Lock()
+ defer ld.exportMu.Unlock()
+
+ if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
+ return nil // cache hit
+ }
+
+ lpkg.IllTyped = true // fail safe
+
+ if lpkg.ExportFile == "" {
+ // Errors while building export data will have been printed to stderr.
+ return fmt.Errorf("no export data file")
+ }
+ f, err := os.Open(lpkg.ExportFile)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Read gc export data.
+ //
+ // We don't currently support gccgo export data because all
+ // underlying workspaces use the gc toolchain. (Even build
+ // systems that support gccgo don't use it for workspace
+ // queries.)
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+
+ // Build the view.
+ //
+ // The gcexportdata machinery has no concept of package ID.
+ // It identifies packages by their PkgPath, which although not
+ // globally unique is unique within the scope of one invocation
+ // of the linker, type-checker, or gcexportdata.
+ //
+ // So, we must build a PkgPath-keyed view of the global
+ // (conceptually ID-keyed) cache of packages and pass it to
+ // gcexportdata. The view must contain every existing
+ // package that might possibly be mentioned by the
+ // current package---its transitive closure.
+ //
+ // In loadPackage, we unconditionally create a types.Package for
+ // each dependency so that export data loading does not
+ // create new ones.
+ //
+ // TODO(adonovan): it would be simpler and more efficient
+ // if the export data machinery invoked a callback to
+ // get-or-create a package instead of a map.
+ //
+ view := make(map[string]*types.Package) // view seen by gcexportdata
+ seen := make(map[*loaderPackage]bool) // all visited packages
+ var visit func(pkgs map[string]*Package)
+ visit = func(pkgs map[string]*Package) {
+ for _, p := range pkgs {
+ lpkg := ld.pkgs[p.ID]
+ if !seen[lpkg] {
+ seen[lpkg] = true
+ view[lpkg.PkgPath] = lpkg.Types
+ visit(lpkg.Imports)
+ }
+ }
+ }
+ visit(lpkg.Imports)
+
+ viewLen := len(view) + 1 // adding the self package
+ // Parse the export data.
+ // (May modify incomplete packages in view but not create new ones.)
+ tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
+ if err != nil {
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+ if _, ok := view["go.shape"]; ok {
+ // Account for the pseudopackage "go.shape" that gets
+ // created by generic code.
+ viewLen++
+ }
+ if viewLen != len(view) {
+ log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath)
+ }
+
+ lpkg.Types = tpkg
+ lpkg.IllTyped = false
+ return nil
+}
+
+// impliedLoadMode returns loadMode with its dependencies.
+func impliedLoadMode(loadMode LoadMode) LoadMode {
+ if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 {
+ // All these things require knowing the import graph.
+ loadMode |= NeedImports
+ }
+ if loadMode&NeedTypes != 0 {
+ // Types require the GoVersion from Module.
+ loadMode |= NeedModule
+ }
+
+ return loadMode
+}
+
+func usesExportData(cfg *Config) bool {
+ return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
+}
+
+type unit struct{}
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
new file mode 100644
index 0000000000..af6a60d75f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/visit.go
@@ -0,0 +1,133 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "cmp"
+ "fmt"
+ "iter"
+ "os"
+ "slices"
+)
+
+// Visit visits all the packages in the import graph whose roots are
+// pkgs, calling the optional pre function the first time each package
+// is encountered (preorder), and the optional post function after a
+// package's dependencies have been visited (postorder).
+// The boolean result of pre(pkg) determines whether
+// the imports of package pkg are visited.
+//
+// Example:
+//
+// pkgs, err := Load(...)
+// if err != nil { ... }
+// Visit(pkgs, nil, func(pkg *Package) {
+// log.Println(pkg)
+// })
+//
+// In most cases, it is more convenient to use [Postorder]:
+//
+// for pkg := range Postorder(pkgs) {
+// log.Println(pkg)
+// }
+func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package)
+ visit = func(pkg *Package) {
+ if !seen[pkg] {
+ seen[pkg] = true
+
+ if pre == nil || pre(pkg) {
+ for _, imp := range sorted(pkg.Imports) { // for determinism
+ visit(imp)
+ }
+ }
+
+ if post != nil {
+ post(pkg)
+ }
+ }
+ }
+ for _, pkg := range pkgs {
+ visit(pkg)
+ }
+}
+
+// PrintErrors prints to os.Stderr the accumulated errors of all
+// packages in the import graph rooted at pkgs, dependencies first.
+// PrintErrors returns the number of errors printed.
+func PrintErrors(pkgs []*Package) int {
+ var n int
+ errModules := make(map[*Module]bool)
+ for pkg := range Postorder(pkgs) {
+ for _, err := range pkg.Errors {
+ fmt.Fprintln(os.Stderr, err)
+ n++
+ }
+
+ // Print pkg.Module.Error once if present.
+ mod := pkg.Module
+ if mod != nil && mod.Error != nil && !errModules[mod] {
+ errModules[mod] = true
+ fmt.Fprintln(os.Stderr, mod.Error.Err)
+ n++
+ }
+ }
+ return n
+}
+
+// Postorder returns an iterator over the the packages in
+// the import graph whose roots are pkg.
+// Packages are enumerated in dependencies-first order.
+func Postorder(pkgs []*Package) iter.Seq[*Package] {
+ return func(yield func(*Package) bool) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package) bool
+ visit = func(pkg *Package) bool {
+ if !seen[pkg] {
+ seen[pkg] = true
+ for _, imp := range sorted(pkg.Imports) { // for determinism
+ if !visit(imp) {
+ return false
+ }
+ }
+ if !yield(pkg) {
+ return false
+ }
+ }
+ return true
+ }
+ for _, pkg := range pkgs {
+ if !visit(pkg) {
+ break
+ }
+ }
+ }
+}
+
+// -- copied from golang.org.x/tools/gopls/internal/util/moremaps --
+
+// sorted returns an iterator over the entries of m in key order.
+func sorted[M ~map[K]V, K cmp.Ordered, V any](m M) iter.Seq2[K, V] {
+ // TODO(adonovan): use maps.Sorted if proposal #68598 is accepted.
+ return func(yield func(K, V) bool) {
+ keys := keySlice(m)
+ slices.Sort(keys)
+ for _, k := range keys {
+ if !yield(k, m[k]) {
+ break
+ }
+ }
+ }
+}
+
+// KeySlice returns the keys of the map M, like slices.Collect(maps.Keys(m)).
+func keySlice[M ~map[K]V, K comparable, V any](m M) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
new file mode 100644
index 0000000000..6c0c74968f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -0,0 +1,820 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package objectpath defines a naming scheme for types.Objects
+// (that is, named entities in Go programs) relative to their enclosing
+// package.
+//
+// Type-checker objects are canonical, so they are usually identified by
+// their address in memory (a pointer), but a pointer has meaning only
+// within one address space. By contrast, objectpath names allow the
+// identity of an object to be sent from one program to another,
+// establishing a correspondence between types.Object variables that are
+// distinct but logically equivalent.
+//
+// A single object may have multiple paths. In this example,
+//
+// type A struct{ X int }
+// type B A
+//
+// the field X has two paths due to its membership of both A and B.
+// The For(obj) function always returns one of these paths, arbitrarily
+// but consistently.
+package objectpath
+
+import (
+ "fmt"
+ "go/types"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// TODO(adonovan): think about generic aliases.
+
+// A Path is an opaque name that identifies a types.Object
+// relative to its package. Conceptually, the name consists of a
+// sequence of destructuring operations applied to the package scope
+// to obtain the original object.
+// The name does not include the package itself.
+type Path string
+
+// Encoding
+//
+// An object path is a textual and (with training) human-readable encoding
+// of a sequence of destructuring operators, starting from a types.Package.
+// The sequences represent a path through the package/object/type graph.
+// We classify these operators by their type:
+//
+// PO package->object Package.Scope.Lookup
+// OT object->type Object.Type
+// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
+// TO type->object Type.{At,Field,Method,Obj} [AFMO]
+//
+// All valid paths start with a package and end at an object
+// and thus may be defined by the regular language:
+//
+// objectpath = PO (OT TT* TO)*
+//
+// The concrete encoding follows directly:
+// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
+// - The only OT operator is Object.Type,
+// which we encode as '.' because dot cannot appear in an identifier.
+// - The TT operators are encoded as [EKPRUTrCa];
+// two of these ({,Recv}TypeParams) require an integer operand,
+// which is encoded as a string of decimal digits.
+// - The TO operators are encoded as [AFMO];
+// three of these (At,Field,Method) require an integer operand,
+// which is encoded as a string of decimal digits.
+// These indices are stable across different representations
+// of the same package, even source and export data.
+// The indices used are implementation specific and may not correspond to
+// the argument to the go/types function.
+//
+// In the example below,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// field X has the path "T.UM0.RA1.F0",
+// representing the following sequence of operations:
+//
+// p.Lookup("T") T
+// .Type().Underlying().Method(0). f
+// .Type().Results().At(1) b
+// .Type().Field(0) X
+//
+// The encoding is not maximally compact---every R or P is
+// followed by an A, for example---but this simplifies the
+// encoder and decoder.
+const (
+ // object->type operators
+ opType = '.' // .Type() (Object)
+
+ // type->type operators
+ opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
+ opKey = 'K' // .Key() (Map)
+ opParams = 'P' // .Params() (Signature)
+ opResults = 'R' // .Results() (Signature)
+ opUnderlying = 'U' // .Underlying() (Named)
+ opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
+ opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature)
+ opConstraint = 'C' // .Constraint() (TypeParam)
+ opRhs = 'a' // .Rhs() (Alias)
+
+ // type->object operators
+ opAt = 'A' // .At(i) (Tuple)
+ opField = 'F' // .Field(i) (Struct)
+ opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
+ opObj = 'O' // .Obj() (Named, TypeParam)
+)
+
+// For is equivalent to new(Encoder).For(obj).
+//
+// It may be more efficient to reuse a single Encoder across several calls.
+func For(obj types.Object) (Path, error) {
+ return new(Encoder).For(obj)
+}
+
+// An Encoder amortizes the cost of encoding the paths of multiple objects.
+// The zero value of an Encoder is ready to use.
+type Encoder struct {
+ scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects
+}
+
+// For returns the path to an object relative to its package,
+// or an error if the object is not accessible from the package's Scope.
+//
+// The For function guarantees to return a path only for the following objects:
+// - package-level types
+// - exported package-level non-types
+// - methods
+// - parameter and result variables
+// - struct fields
+// These objects are sufficient to define the API of their package.
+// The objects described by a package's export data are drawn from this set.
+//
+// The set of objects accessible from a package's Scope depends on
+// whether the package was produced by type-checking syntax, or
+// reading export data; the latter may have a smaller Scope since
+// export data trims objects that are not reachable from an exported
+// declaration. For example, the For function will return a path for
+// an exported method of an unexported type that is not reachable
+// from any public declaration; this path will cause the Object
+// function to fail if called on a package loaded from export data.
+// TODO(adonovan): is this a bug or feature? Should this package
+// compute accessibility in the same way?
+//
+// For does not return a path for predeclared names, imported package
+// names, local names, and unexported package-level names (except
+// types).
+//
+// Example: given this definition,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// For(X) would return a path that denotes the following sequence of operations:
+//
+// p.Scope().Lookup("T") (TypeName T)
+// .Type().Underlying().Method(0). (method Func f)
+// .Type().Results().At(1) (field Var b)
+// .Type().Field(0) (field Var X)
+//
+// where p is the package (*types.Package) to which X belongs.
+func (enc *Encoder) For(obj types.Object) (Path, error) {
+ pkg := obj.Pkg()
+
+ // This table lists the cases of interest.
+ //
+ // Object Action
+ // ------ ------
+ // nil reject
+ // builtin reject
+ // pkgname reject
+ // label reject
+ // var
+ // package-level accept
+ // func param/result accept
+ // local reject
+ // struct field accept
+ // const
+ // package-level accept
+ // local reject
+ // func
+ // package-level accept
+ // init functions reject
+ // concrete method accept
+ // interface method accept
+ // type
+ // package-level accept
+ // local reject
+ //
+ // The only accessible package-level objects are members of pkg itself.
+ //
+ // The cases are handled in four steps:
+ //
+ // 1. reject nil and builtin
+ // 2. accept package-level objects
+ // 3. reject obviously invalid objects
+ // 4. search the API for the path to the param/result/field/method.
+
+ // 1. reference to nil or builtin?
+ if pkg == nil {
+ return "", fmt.Errorf("predeclared %s has no path", obj)
+ }
+ scope := pkg.Scope()
+
+ // 2. package-level object?
+ if scope.Lookup(obj.Name()) == obj {
+ // Only exported objects (and non-exported types) have a path.
+ // Non-exported types may be referenced by other objects.
+ if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
+ return "", fmt.Errorf("no path for non-exported %v", obj)
+ }
+ return Path(obj.Name()), nil
+ }
+
+ // 3. Not a package-level object.
+ // Reject obviously non-viable cases.
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
+ // With the exception of type parameters, only package-level type names
+ // have a path.
+ return "", fmt.Errorf("no path for %v", obj)
+ }
+ case *types.Const, // Only package-level constants have a path.
+ *types.Label, // Labels are function-local.
+ *types.PkgName: // PkgNames are file-local.
+ return "", fmt.Errorf("no path for %v", obj)
+
+ case *types.Var:
+ // Could be:
+ // - a field (obj.IsField())
+ // - a func parameter or result
+ // - a local var.
+ // Sadly there is no way to distinguish
+ // a param/result from a local
+ // so we must proceed to the find.
+
+ case *types.Func:
+ // A func, if not package-level, must be a method.
+ if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
+ return "", fmt.Errorf("func is not a method: %v", obj)
+ }
+
+ if path, ok := enc.concreteMethod(obj); ok {
+ // Fast path for concrete methods that avoids looping over scope.
+ return path, nil
+ }
+
+ default:
+ panic(obj)
+ }
+
+ // 4. Search the API for the path to the var (field/param/result) or method.
+
+ // First inspect package-level named types.
+ // In the presence of path aliases, these give
+ // the best paths because non-types may
+ // refer to types, but not the reverse.
+ empty := make([]byte, 0, 48) // initial space
+ objs := enc.scopeObjects(scope)
+ for _, o := range objs {
+ tname, ok := o.(*types.TypeName)
+ if !ok {
+ continue // handle non-types in second pass
+ }
+
+ path := append(empty, o.Name()...)
+ path = append(path, opType)
+
+ T := o.Type()
+ if alias, ok := T.(*types.Alias); ok {
+ if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
+ return Path(r), nil
+ }
+ if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
+ return Path(r), nil
+ }
+
+ } else if tname.IsAlias() {
+ // legacy alias
+ if r := find(obj, T, path); r != nil {
+ return Path(r), nil
+ }
+
+ } else if named, ok := T.(*types.Named); ok {
+ // defined (named) type
+ if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
+ return Path(r), nil
+ }
+ if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+
+ // Then inspect everything else:
+ // non-types, and declared methods of defined types.
+ for _, o := range objs {
+ path := append(empty, o.Name()...)
+ if _, ok := o.(*types.TypeName); !ok {
+ if o.Exported() {
+ // exported non-type (const, var, func)
+ if r := find(obj, o.Type(), append(path, opType)); r != nil {
+ return Path(r), nil
+ }
+ }
+ continue
+ }
+
+ // Inspect declared methods of defined types.
+ if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
+ path = append(path, opType)
+ // The method index here is always with respect
+ // to the underlying go/types data structures,
+ // which ultimately derives from source order
+ // and must be preserved by export data.
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ path2 := appendOpArg(path, opMethod, i)
+ if m == obj {
+ return Path(path2), nil // found declared method
+ }
+ if r := find(obj, m.Type(), append(path2, opType)); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
+}
+
+func appendOpArg(path []byte, op byte, arg int) []byte {
+ path = append(path, op)
+ path = strconv.AppendInt(path, int64(arg), 10)
+ return path
+}
+
+// concreteMethod returns the path for meth, which must have a non-nil receiver.
+// The second return value indicates success and may be false if the method is
+// an interface method or if it is an instantiated method.
+//
+// This function is just an optimization that avoids the general scope walking
+// approach. You are expected to fall back to the general approach if this
+// function fails.
+func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
+ // Concrete methods can only be declared on package-scoped named types. For
+ // that reason we can skip the expensive walk over the package scope: the
+ // path will always be package -> named type -> method. We can trivially get
+ // the type name from the receiver, and only have to look over the type's
+ // methods to find the method index.
+ //
+ // Methods on generic types require special consideration, however. Consider
+ // the following package:
+ //
+ // L1: type S[T any] struct{}
+ // L2: func (recv S[A]) Foo() { recv.Bar() }
+ // L3: func (recv S[B]) Bar() { }
+ // L4: type Alias = S[int]
+ // L5: func _[T any]() { var s S[int]; s.Foo() }
+ //
+ // The receivers of methods on generic types are instantiations. L2 and L3
+ // instantiate S with the type-parameters A and B, which are scoped to the
+ // respective methods. L4 and L5 each instantiate S with int. Each of these
+ // instantiations has its own method set, full of methods (and thus objects)
+ // with receivers whose types are the respective instantiations. In other
+ // words, we have
+ //
+ // S[A].Foo, S[A].Bar
+ // S[B].Foo, S[B].Bar
+ // S[int].Foo, S[int].Bar
+ //
+ // We may thus be trying to produce object paths for any of these objects.
+ //
+ // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
+ // and S.Bar, which are the paths that this function naturally produces.
+ //
+ // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
+ // don't correspond to the origin methods. For S[int], this is significant.
+ // The most precise object path for S[int].Foo, for example, is Alias.Foo,
+ // not S.Foo. Our function, however, would produce S.Foo, which would
+ // resolve to a different object.
+ //
+ // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
+ // still the correct paths, since only the origin methods have meaningful
+ // paths. But this is likely only true for trivial cases and has edge cases.
+ // Since this function is only an optimization, we err on the side of giving
+ // up, deferring to the slower but definitely correct algorithm. Most users
+ // of objectpath will only be giving us origin methods, anyway, as referring
+ // to instantiated methods is usually not useful.
+
+ if meth.Origin() != meth {
+ return "", false
+ }
+
+ _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv())
+ if named == nil {
+ return "", false
+ }
+
+ if types.IsInterface(named) {
+ // Named interfaces don't have to be package-scoped
+ //
+ // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
+ // methods, too, I think.
+ return "", false
+ }
+
+ // Preallocate space for the name, opType, opMethod, and some digits.
+ name := named.Obj().Name()
+ path := make([]byte, 0, len(name)+8)
+ path = append(path, name...)
+ path = append(path, opType)
+
+ // Method indices are w.r.t. the go/types data structures,
+ // ultimately deriving from source order,
+ // which is preserved by export data.
+ for i := 0; i < named.NumMethods(); i++ {
+ if named.Method(i) == meth {
+ path = appendOpArg(path, opMethod, i)
+ return Path(path), true
+ }
+ }
+
+ // Due to golang/go#59944, go/types fails to associate the receiver with
+ // certain methods on cgo types.
+ //
+ // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go
+ // versions gopls supports.
+ return "", false
+ // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named)))
+}
+
+// find finds obj within type T, returning the path to it, or nil if not found.
+//
+// The seen map is used to short circuit cycles through type parameters. If
+// nil, it will be allocated as necessary.
+//
+// The seenMethods map is used internally to short circuit cycles through
+// interface methods, such as occur in the following example:
+//
+// type I interface { f() interface{I} }
+//
+// See golang/go#68046 for details.
+func find(obj types.Object, T types.Type, path []byte) []byte {
+ return (&finder{obj: obj}).find(T, path)
+}
+
+// finder closes over search state for a call to find.
+type finder struct {
+ obj types.Object // the sought object
+ seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
+ seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces
+}
+
+func (f *finder) find(T types.Type, path []byte) []byte {
+ switch T := T.(type) {
+ case *types.Alias:
+ return f.find(types.Unalias(T), path)
+ case *types.Basic, *types.Named:
+ // Named types belonging to pkg were handled already,
+ // so T must belong to another package. No path.
+ return nil
+ case *types.Pointer:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Slice:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Array:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Chan:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Map:
+ if r := f.find(T.Key(), append(path, opKey)); r != nil {
+ return r
+ }
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Signature:
+ if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
+ return r
+ }
+ if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
+ return r
+ }
+ if r := f.find(T.Params(), append(path, opParams)); r != nil {
+ return r
+ }
+ return f.find(T.Results(), append(path, opResults))
+ case *types.Struct:
+ for i := 0; i < T.NumFields(); i++ {
+ fld := T.Field(i)
+ path2 := appendOpArg(path, opField, i)
+ if fld == f.obj {
+ return path2 // found field var
+ }
+ if r := f.find(fld.Type(), append(path2, opType)); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Tuple:
+ for i := 0; i < T.Len(); i++ {
+ v := T.At(i)
+ path2 := appendOpArg(path, opAt, i)
+ if v == f.obj {
+ return path2 // found param/result var
+ }
+ if r := f.find(v.Type(), append(path2, opType)); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Interface:
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ if f.seenMethods[m] {
+ return nil
+ }
+ path2 := appendOpArg(path, opMethod, i)
+ if m == f.obj {
+ return path2 // found interface method
+ }
+ if f.seenMethods == nil {
+ f.seenMethods = make(map[*types.Func]bool)
+ }
+ f.seenMethods[m] = true
+ if r := f.find(m.Type(), append(path2, opType)); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.TypeParam:
+ name := T.Obj()
+ if f.seenTParamNames[name] {
+ return nil
+ }
+ if name == f.obj {
+ return append(path, opObj)
+ }
+ if f.seenTParamNames == nil {
+ f.seenTParamNames = make(map[*types.TypeName]bool)
+ }
+ f.seenTParamNames[name] = true
+ if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
+ return r
+ }
+ return nil
+ }
+ panic(T)
+}
+
+func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
+ return (&finder{obj: obj}).findTypeParam(list, path, op)
+}
+
+func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ path2 := appendOpArg(path, op, i)
+ if r := f.find(tparam, path2); r != nil {
+ return r
+ }
+ }
+ return nil
+}
+
+// Object returns the object denoted by path p within the package pkg.
+func Object(pkg *types.Package, p Path) (types.Object, error) {
+ pathstr := string(p)
+ if pathstr == "" {
+ return nil, fmt.Errorf("empty path")
+ }
+
+ var pkgobj, suffix string
+ if dot := strings.IndexByte(pathstr, opType); dot < 0 {
+ pkgobj = pathstr
+ } else {
+ pkgobj = pathstr[:dot]
+ suffix = pathstr[dot:] // suffix starts with "."
+ }
+
+ obj := pkg.Scope().Lookup(pkgobj)
+ if obj == nil {
+ return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
+ }
+
+ // abstraction of *types.{Pointer,Slice,Array,Chan,Map}
+ type hasElem interface {
+ Elem() types.Type
+ }
+ // abstraction of *types.{Named,Signature}
+ type hasTypeParams interface {
+ TypeParams() *types.TypeParamList
+ }
+ // abstraction of *types.{Alias,Named,TypeParam}
+ type hasObj interface {
+ Obj() *types.TypeName
+ }
+
+ // The loop state is the pair (t, obj),
+ // exactly one of which is non-nil, initially obj.
+ // All suffixes start with '.' (the only object->type operation),
+ // followed by optional type->type operations,
+ // then a type->object operation.
+ // The cycle then repeats.
+ var t types.Type
+ for suffix != "" {
+ code := suffix[0]
+ suffix = suffix[1:]
+
+ // Codes [AFMTr] have an integer operand.
+ var index int
+ switch code {
+ case opAt, opField, opMethod, opTypeParam, opRecvTypeParam:
+ rest := strings.TrimLeft(suffix, "0123456789")
+ numerals := suffix[:len(suffix)-len(rest)]
+ suffix = rest
+ i, err := strconv.Atoi(numerals)
+ if err != nil {
+ return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
+ }
+ index = int(i)
+ case opObj:
+ // no operand
+ default:
+ // The suffix must end with a type->object operation.
+ if suffix == "" {
+ return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
+ }
+ }
+
+ if code == opType {
+ if t != nil {
+ return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
+ }
+ t = obj.Type()
+ obj = nil
+ continue
+ }
+
+ if t == nil {
+ return nil, fmt.Errorf("invalid path: code %q in object context", code)
+ }
+
+ // Inv: t != nil, obj == nil
+
+ t = types.Unalias(t)
+ switch code {
+ case opElem:
+ hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
+ }
+ t = hasElem.Elem()
+
+ case opKey:
+ mapType, ok := t.(*types.Map)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
+ }
+ t = mapType.Key()
+
+ case opParams:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Params()
+
+ case opResults:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Results()
+
+ case opUnderlying:
+ named, ok := t.(*types.Named)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
+ }
+ t = named.Underlying()
+
+ case opRhs:
+ if alias, ok := t.(*types.Alias); ok {
+ t = aliases.Rhs(alias)
+ } else if false && aliases.Enabled() {
+ // The Enabled check is too expensive, so for now we
+ // simply assume that aliases are not enabled.
+ //
+ // Now that go1.24 is assured, we should be able to
+ // replace this with "if true {", but it causes tests
+ // to fail. TODO(adonovan): investigate.
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
+ }
+
+ case opTypeParam:
+ hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
+ }
+ tparams := hasTypeParams.TypeParams()
+ if n := tparams.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ t = tparams.At(index)
+
+ case opRecvTypeParam:
+ sig, ok := t.(*types.Signature) // Signature
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ rtparams := sig.RecvTypeParams()
+ if n := rtparams.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ t = rtparams.At(index)
+
+ case opConstraint:
+ tparam, ok := t.(*types.TypeParam)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
+ }
+ t = tparam.Constraint()
+
+ case opAt:
+ tuple, ok := t.(*types.Tuple)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
+ }
+ if n := tuple.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ obj = tuple.At(index)
+ t = nil
+
+ case opField:
+ structType, ok := t.(*types.Struct)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
+ }
+ if n := structType.NumFields(); index >= n {
+ return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
+ }
+ obj = structType.Field(index)
+ t = nil
+
+ case opMethod:
+ switch t := t.(type) {
+ case *types.Interface:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index) // Id-ordered
+
+ case *types.Named:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index)
+
+ default:
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
+ }
+ t = nil
+
+ case opObj:
+ hasObj, ok := t.(hasObj)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
+ }
+ obj = hasObj.Obj()
+ t = nil
+
+ default:
+ return nil, fmt.Errorf("invalid path: unknown code %q", code)
+ }
+ }
+
+ if obj == nil {
+ panic(p) // path does not end in an object-valued operator
+ }
+
+ if obj.Pkg() != pkg {
+ return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
+ }
+
+ return obj, nil // success
+}
+
+// scopeObjects is a memoization of scope objects.
+// Callers must not modify the result.
+func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object {
+ m := enc.scopeMemo
+ if m == nil {
+ m = make(map[*types.Scope][]types.Object)
+ enc.scopeMemo = m
+ }
+ objs, ok := m[scope]
+ if !ok {
+ names := scope.Names() // allocates and sorts
+ objs = make([]types.Object, len(names))
+ for i, name := range names {
+ objs[i] = scope.Lookup(name)
+ }
+ m[scope] = objs
+ }
+ return objs
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
new file mode 100644
index 0000000000..5f10f56cba
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+ "go/ast"
+ "go/types"
+ _ "unsafe" // for linkname
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+//
+// Functions and methods may potentially have type parameters.
+//
+// Note: for calls of instantiated functions and methods, Callee returns
+// the corresponding generic function or method on the generic type.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+ obj := info.Uses[usedIdent(info, call.Fun)]
+ if obj == nil {
+ return nil
+ }
+ if _, ok := obj.(*types.TypeName); ok {
+ return nil
+ }
+ return obj
+}
+
+// StaticCallee returns the target (function or method) of a static function
+// call, if any. It returns nil for calls to builtins.
+//
+// Note: for calls of instantiated functions and methods, StaticCallee returns
+// the corresponding generic function or method on the generic type.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+ obj := info.Uses[usedIdent(info, call.Fun)]
+ fn, _ := obj.(*types.Func)
+ if fn == nil || interfaceMethod(fn) {
+ return nil
+ }
+ return fn
+}
+
+// usedIdent is the implementation of [internal/typesinternal.UsedIdent].
+// It returns the identifier associated with e.
+// See typesinternal.UsedIdent for a fuller description.
+// This function should live in typesinternal, but cannot because it would
+// create an import cycle.
+//
+//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
+func usedIdent(info *types.Info, e ast.Expr) *ast.Ident {
+ if info.Types == nil || info.Uses == nil {
+ panic("one of info.Types or info.Uses is nil; both must be populated")
+ }
+ // Look through type instantiation if necessary.
+ switch d := ast.Unparen(e).(type) {
+ case *ast.IndexExpr:
+ if info.Types[d.Index].IsType() {
+ e = d.X
+ }
+ case *ast.IndexListExpr:
+ e = d.X
+ }
+
+ switch e := ast.Unparen(e).(type) {
+ // info.Uses always has the object we want, even for selector expressions.
+ // We don't need info.Selections.
+ // See go/types/recording.go:recordSelection.
+ case *ast.Ident:
+ return e
+ case *ast.SelectorExpr:
+ return e.Sel
+ }
+ return nil
+}
+
+// interfaceMethod reports whether its argument is a method of an interface.
+// This function should live in typesinternal, but cannot because it would create an import cycle.
+//
+//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
+func interfaceMethod(f *types.Func) bool {
+ recv := f.Signature().Recv()
+ return recv != nil && types.IsInterface(recv.Type())
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
new file mode 100644
index 0000000000..b81ce0c330
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
@@ -0,0 +1,30 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+ var result []*types.Package
+ seen := make(map[*types.Package]bool)
+ var visit func(pkgs []*types.Package)
+ visit = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !seen[p] {
+ seen[p] = true
+ visit(p.Imports())
+ result = append(result, p)
+ }
+ }
+ }
+ visit(pkgs)
+ return result
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
new file mode 100644
index 0000000000..f035a0b6be
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -0,0 +1,460 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as [Map],
+// a hash table that maps [types.Type] to any value.
+package typeutil
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "hash/maphash"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary values. The concrete types that implement
+// the Type interface are pointers. Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Read-only map operations ([Map.At], [Map.Len], and so on) may
+// safely be called concurrently.
+//
+// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
+// and 69559, if the latter proposals for a generic hash-map type and
+// a types.Hash function are accepted.
+type Map struct {
+ table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+ length int // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+ key types.Type
+ value any
+}
+
+// SetHasher has no effect.
+//
+// It is a relic of an optimization that is no longer profitable. Do
+// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
+func (m *Map) SetHasher(Hasher) {}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+func (m *Map) Delete(key types.Type) bool {
+ if m != nil && m.table != nil {
+ hash := hash(key)
+ bucket := m.table[hash]
+ for i, e := range bucket {
+ if e.key != nil && types.Identical(key, e.key) {
+ // We can't compact the bucket as it
+ // would disturb iterators.
+ bucket[i] = entry{}
+ m.length--
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+func (m *Map) At(key types.Type) any {
+ if m != nil && m.table != nil {
+ for _, e := range m.table[hash(key)] {
+ if e.key != nil && types.Identical(key, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value any) (prev any) {
+ if m.table != nil {
+ hash := hash(key)
+ bucket := m.table[hash]
+ var hole *entry
+ for i, e := range bucket {
+ if e.key == nil {
+ hole = &bucket[i]
+ } else if types.Identical(key, e.key) {
+ prev = e.value
+ bucket[i].value = value
+ return
+ }
+ }
+
+ if hole != nil {
+ *hole = entry{key, value} // overwrite deleted entry
+ } else {
+ m.table[hash] = append(bucket, entry{key, value})
+ }
+ } else {
+ hash := hash(key)
+ m.table = map[uint32][]entry{hash: {entry{key, value}}}
+ }
+
+ m.length++
+ return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+func (m *Map) Iterate(f func(key types.Type, value any)) {
+ if m != nil {
+ for _, bucket := range m.table {
+ for _, e := range bucket {
+ if e.key != nil {
+ f(e.key, e.value)
+ }
+ }
+ }
+ }
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+ keys := make([]types.Type, 0, m.Len())
+ m.Iterate(func(key types.Type, _ any) {
+ keys = append(keys, key)
+ })
+ return keys
+}
+
+func (m *Map) toString(values bool) string {
+ if m == nil {
+ return "{}"
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "{")
+ sep := ""
+ m.Iterate(func(key types.Type, value any) {
+ fmt.Fprint(&buf, sep)
+ sep = ", "
+ fmt.Fprint(&buf, key)
+ if values {
+ fmt.Fprintf(&buf, ": %q", value)
+ }
+ })
+ fmt.Fprint(&buf, "}")
+ return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+func (m *Map) String() string {
+ return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+func (m *Map) KeysString() string {
+ return m.toString(false)
+}
+
+// -- Hasher --
+
+// hash returns the hash of type t.
+// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
+func hash(t types.Type) uint32 {
+ return theHasher.Hash(t)
+}
+
+// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
+// Hashers are stateless, and all are equivalent.
+type Hasher struct{}
+
+var theHasher Hasher
+
+// MakeHasher returns Hasher{}.
+// Hashers are stateless; all are equivalent.
+func MakeHasher() Hasher { return theHasher }
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+ return hasher{inGenericSig: false}.hash(t)
+}
+
+// hasher holds the state of a single Hash traversal: whether we are
+// inside the signature of a generic function; this is used to
+// optimize [hasher.hashTypeParam].
+type hasher struct{ inGenericSig bool }
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// hash computes the hash of t.
+func (h hasher) hash(t types.Type) uint32 {
+ // See Identical for rationale.
+ switch t := t.(type) {
+ case *types.Basic:
+ return uint32(t.Kind())
+
+ case *types.Alias:
+ return h.hash(types.Unalias(t))
+
+ case *types.Array:
+ return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
+
+ case *types.Slice:
+ return 9049 + 2*h.hash(t.Elem())
+
+ case *types.Struct:
+ var hash uint32 = 9059
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ hash += 8861
+ }
+ hash += hashString(t.Tag(i))
+ hash += hashString(f.Name()) // (ignore f.Pkg)
+ hash += h.hash(f.Type())
+ }
+ return hash
+
+ case *types.Pointer:
+ return 9067 + 2*h.hash(t.Elem())
+
+ case *types.Signature:
+ var hash uint32 = 9091
+ if t.Variadic() {
+ hash *= 8863
+ }
+
+ tparams := t.TypeParams()
+ if n := tparams.Len(); n > 0 {
+ h.inGenericSig = true // affects constraints, params, and results
+
+ for i := range n {
+ tparam := tparams.At(i)
+ hash += 7 * h.hash(tparam.Constraint())
+ }
+ }
+
+ return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+ case *types.Union:
+ return h.hashUnion(t)
+
+ case *types.Interface:
+ // Interfaces are identical if they have the same set of methods, with
+ // identical names and types, and they have the same set of type
+ // restrictions. See go/types.identical for more details.
+ var hash uint32 = 9103
+
+ // Hash methods.
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ // Method order is not significant.
+ // Ignore m.Pkg().
+ m := t.Method(i)
+ // Use shallow hash on method signature to
+ // avoid anonymous interface cycles.
+ hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
+ }
+
+ // Hash type restrictions.
+ terms, err := typeparams.InterfaceTermSet(t)
+ // if err != nil t has invalid type restrictions.
+ if err == nil {
+ hash += h.hashTermSet(terms)
+ }
+
+ return hash
+
+ case *types.Map:
+ return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
+
+ case *types.Chan:
+ return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
+
+ case *types.Named:
+ hash := h.hashTypeName(t.Obj())
+ targs := t.TypeArgs()
+ for i := 0; i < targs.Len(); i++ {
+ targ := targs.At(i)
+ hash += 2 * h.hash(targ)
+ }
+ return hash
+
+ case *types.TypeParam:
+ return h.hashTypeParam(t)
+
+ case *types.Tuple:
+ return h.hashTuple(t)
+ }
+
+ panic(fmt.Sprintf("%T: %v", t, t))
+}
+
+func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
+ // See go/types.identicalTypes for rationale.
+ n := tuple.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := range n {
+ hash += 3 * h.hash(tuple.At(i).Type())
+ }
+ return hash
+}
+
+func (h hasher) hashUnion(t *types.Union) uint32 {
+ // Hash type restrictions.
+ terms, err := typeparams.UnionTermSet(t)
+ // if err != nil t has invalid type restrictions. Fall back on a non-zero
+ // hash.
+ if err != nil {
+ return 9151
+ }
+ return h.hashTermSet(terms)
+}
+
+func (h hasher) hashTermSet(terms []*types.Term) uint32 {
+ hash := 9157 + 2*uint32(len(terms))
+ for _, term := range terms {
+ // term order is not significant.
+ termHash := h.hash(term.Type())
+ if term.Tilde() {
+ termHash *= 9161
+ }
+ hash += 3 * termHash
+ }
+ return hash
+}
+
+// hashTypeParam returns the hash of a type parameter.
+func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
+ // Within the signature of a generic function, TypeParams are
+ // identical if they have the same index and constraint, so we
+ // hash them based on index.
+ //
+ // When we are outside a generic function, free TypeParams are
+ // identical iff they are the same object, so we can use a
+ // more discriminating hash consistent with object identity.
+ // This optimization saves [Map] about 4% when hashing all the
+ // types.Info.Types in the forward closure of net/http.
+ if !h.inGenericSig {
+ // Optimization: outside a generic function signature,
+ // use a more discrimating hash consistent with object identity.
+ return h.hashTypeName(t.Obj())
+ }
+ return 9173 + 3*uint32(t.Index())
+}
+
+var theSeed = maphash.MakeSeed()
+
+// hashTypeName hashes the pointer of tname.
+func (hasher) hashTypeName(tname *types.TypeName) uint32 {
+ // Since types.Identical uses == to compare TypeNames,
+ // the Hash function uses maphash.Comparable.
+ hash := maphash.Comparable(theSeed, tname)
+ return uint32(hash ^ (hash >> 32))
+}
+
+// shallowHash computes a hash of t without looking at any of its
+// element Types, to avoid potential anonymous cycles in the types of
+// interface methods.
+//
+// When an unnamed non-empty interface type appears anywhere among the
+// arguments or results of an interface method, there is a potential
+// for endless recursion. Consider:
+//
+// type X interface { m() []*interface { X } }
+//
+// The problem is that the Methods of the interface in m's result type
+// include m itself; there is no mention of the named type X that
+// might help us break the cycle.
+// (See comment in go/types.identical, case *Interface, for more.)
+func (h hasher) shallowHash(t types.Type) uint32 {
+ // t is the type of an interface method (Signature),
+ // its params or results (Tuples), or their immediate
+ // elements (mostly Slice, Pointer, Basic, Named),
+ // so there's no need to optimize anything else.
+ switch t := t.(type) {
+ case *types.Alias:
+ return h.shallowHash(types.Unalias(t))
+
+ case *types.Signature:
+ var hash uint32 = 604171
+ if t.Variadic() {
+ hash *= 971767
+ }
+ // The Signature/Tuple recursion is always finite
+ // and invariably shallow.
+ return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
+
+ case *types.Tuple:
+ n := t.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := range n {
+ hash += 53471161 * h.shallowHash(t.At(i).Type())
+ }
+ return hash
+
+ case *types.Basic:
+ return 45212177 * uint32(t.Kind())
+
+ case *types.Array:
+ return 1524181 + 2*uint32(t.Len())
+
+ case *types.Slice:
+ return 2690201
+
+ case *types.Struct:
+ return 3326489
+
+ case *types.Pointer:
+ return 4393139
+
+ case *types.Union:
+ return 562448657
+
+ case *types.Interface:
+ return 2124679 // no recursion here
+
+ case *types.Map:
+ return 9109
+
+ case *types.Chan:
+ return 9127
+
+ case *types.Named:
+ return h.hashTypeName(t.Obj())
+
+ case *types.TypeParam:
+ return h.hashTypeParam(t)
+ }
+ panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
new file mode 100644
index 0000000000..f7666028fe
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+ "go/types"
+ "sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+ mu sync.Mutex
+ named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+ others map[types.Type]*types.MethodSet // all other types
+}
+
+// MethodSet returns the method set of type T. It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+ if cache == nil {
+ return types.NewMethodSet(T)
+ }
+ cache.mu.Lock()
+ defer cache.mu.Unlock()
+
+ switch T := types.Unalias(T).(type) {
+ case *types.Named:
+ return cache.lookupNamed(T).value
+
+ case *types.Pointer:
+ if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
+ return cache.lookupNamed(N).pointer
+ }
+ }
+
+ // all other types
+ // (The map uses pointer equivalence, not type identity.)
+ mset := cache.others[T]
+ if mset == nil {
+ mset = types.NewMethodSet(T)
+ if cache.others == nil {
+ cache.others = make(map[types.Type]*types.MethodSet)
+ }
+ cache.others[T] = mset
+ }
+ return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+ if cache.named == nil {
+ cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+ }
+ // Avoid recomputing mset(*T) for each distinct Pointer
+ // instance whose underlying type is a named type.
+ msets, ok := cache.named[named]
+ if !ok {
+ msets.value = types.NewMethodSet(named)
+ msets.pointer = types.NewMethodSet(types.NewPointer(named))
+ cache.named[named] = msets
+ }
+ return msets
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
new file mode 100644
index 0000000000..9dda6a25df
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
@@ -0,0 +1,53 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import (
+ "go/types"
+)
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+ isPointerToConcrete := func(T types.Type) bool {
+ ptr, ok := types.Unalias(T).(*types.Pointer)
+ return ok && !types.IsInterface(ptr.Elem())
+ }
+
+ var result []*types.Selection
+ mset := msets.MethodSet(T)
+ if types.IsInterface(T) || isPointerToConcrete(T) {
+ for i, n := 0, mset.Len(); i < n; i++ {
+ result = append(result, mset.At(i))
+ }
+ } else {
+ // T is some other concrete type.
+ // Report methods of T and *T, preferring those of T.
+ pmset := msets.MethodSet(types.NewPointer(T))
+ for i, n := 0, pmset.Len(); i < n; i++ {
+ meth := pmset.At(i)
+ if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+ meth = m
+ }
+ result = append(result, meth)
+ }
+
+ }
+ return result
+}
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go
new file mode 100644
index 0000000000..b9425f5a20
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go
@@ -0,0 +1,38 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aliases
+
+import (
+ "go/token"
+ "go/types"
+)
+
+// Package aliases defines backward compatible shims
+// for the types.Alias type representation added in 1.22.
+// This defines placeholders for x/tools until 1.26.
+
+// NewAlias creates a new TypeName in Package pkg that
+// is an alias for the type rhs.
+//
+// The enabled parameter determines whether the resulting [TypeName]'s
+// type is an [types.Alias]. Its value must be the result of a call to
+// [Enabled], which computes the effective value of
+// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
+// function is expensive and should be called once per task (e.g.
+// package import), not once per call to NewAlias.
+//
+// Precondition: enabled || len(tparams)==0.
+// If materialized aliases are disabled, there must not be any type parameters.
+func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
+ if enabled {
+ tname := types.NewTypeName(pos, pkg, name, nil)
+ SetTypeParams(types.NewAlias(tname, rhs), tparams)
+ return tname
+ }
+ if len(tparams) > 0 {
+ panic("cannot create an alias with type parameters when gotypesalias is not enabled")
+ }
+ return types.NewTypeName(pos, pkg, name, rhs)
+}
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
new file mode 100644
index 0000000000..7716a3331d
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
@@ -0,0 +1,80 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aliases
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+)
+
+// Rhs returns the type on the right-hand side of the alias declaration.
+func Rhs(alias *types.Alias) types.Type {
+ if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
+ return alias.Rhs() // go1.23+
+ }
+
+ // go1.22's Alias didn't have the Rhs method,
+ // so Unalias is the best we can do.
+ return types.Unalias(alias)
+}
+
+// TypeParams returns the type parameter list of the alias.
+func TypeParams(alias *types.Alias) *types.TypeParamList {
+ if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
+ return alias.TypeParams() // go1.23+
+ }
+ return nil
+}
+
+// SetTypeParams sets the type parameters of the alias type.
+func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
+ if alias, ok := any(alias).(interface {
+ SetTypeParams(tparams []*types.TypeParam)
+ }); ok {
+ alias.SetTypeParams(tparams) // go1.23+
+ } else if len(tparams) > 0 {
+ panic("cannot set type parameters of an Alias type in go1.22")
+ }
+}
+
+// TypeArgs returns the type arguments used to instantiate the Alias type.
+func TypeArgs(alias *types.Alias) *types.TypeList {
+ if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
+ return alias.TypeArgs() // go1.23+
+ }
+ return nil // empty (go1.22)
+}
+
+// Origin returns the generic Alias type of which alias is an instance.
+// If alias is not an instance of a generic alias, Origin returns alias.
+func Origin(alias *types.Alias) *types.Alias {
+ if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
+ return alias.Origin() // go1.23+
+ }
+ return alias // not an instance of a generic alias (go1.22)
+}
+
+// Enabled reports whether [NewAlias] should create [types.Alias] types.
+//
+// This function is expensive! Call it sparingly.
+func Enabled() bool {
+ // The only reliable way to compute the answer is to invoke go/types.
+ // We don't parse the GODEBUG environment variable, because
+ // (a) it's tricky to do so in a manner that is consistent
+ // with the godebug package; in particular, a simple
+ // substring check is not good enough. The value is a
+ // rightmost-wins list of options. But more importantly:
+ // (b) it is impossible to detect changes to the effective
+ // setting caused by os.Setenv("GODEBUG"), as happens in
+ // many tests. Therefore any attempt to cache the result
+ // is just incorrect.
+ fset := token.NewFileSet()
+ f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
+ pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
+ _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
+ return enabled
+}
diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go
new file mode 100644
index 0000000000..ade5d1e799
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/core/event.go
@@ -0,0 +1,80 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package core provides support for event based telemetry.
+package core
+
+import (
+ "fmt"
+ "time"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Event holds the information about an event of note that occurred.
+type Event struct {
+ at time.Time
+
+ // As events are often on the stack, storing the first few labels directly
+ // in the event can avoid an allocation at all for the very common cases of
+ // simple events.
+ // The length needs to be large enough to cope with the majority of events
+ // but no so large as to cause undue stack pressure.
+ // A log message with two values will use 3 labels (one for each value and
+ // one for the message itself).
+
+ static [3]label.Label // inline storage for the first few labels
+ dynamic []label.Label // dynamically sized storage for remaining labels
+}
+
+func (ev Event) At() time.Time { return ev.at }
+
+func (ev Event) Format(f fmt.State, r rune) {
+ if !ev.at.IsZero() {
+ fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
+ }
+ for index := 0; ev.Valid(index); index++ {
+ if l := ev.Label(index); l.Valid() {
+ fmt.Fprintf(f, "\n\t%v", l)
+ }
+ }
+}
+
+func (ev Event) Valid(index int) bool {
+ return index >= 0 && index < len(ev.static)+len(ev.dynamic)
+}
+
+func (ev Event) Label(index int) label.Label {
+ if index < len(ev.static) {
+ return ev.static[index]
+ }
+ return ev.dynamic[index-len(ev.static)]
+}
+
+func (ev Event) Find(key label.Key) label.Label {
+ for _, l := range ev.static {
+ if l.Key() == key {
+ return l
+ }
+ }
+ for _, l := range ev.dynamic {
+ if l.Key() == key {
+ return l
+ }
+ }
+ return label.Label{}
+}
+
+func MakeEvent(static [3]label.Label, labels []label.Label) Event {
+ return Event{
+ static: static,
+ dynamic: labels,
+ }
+}
+
+// CloneEvent event returns a copy of the event with the time adjusted to at.
+func CloneEvent(ev Event, at time.Time) Event {
+ ev.at = at
+ return ev
+}
diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go
new file mode 100644
index 0000000000..05f3a9a579
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/core/export.go
@@ -0,0 +1,70 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package core
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+ "unsafe"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Exporter is a function that handles events.
+// It may return a modified context and event.
+type Exporter func(context.Context, Event, label.Map) context.Context
+
+var (
+ exporter unsafe.Pointer
+)
+
+// SetExporter sets the global exporter function that handles all events.
+// The exporter is called synchronously from the event call site, so it should
+// return quickly so as not to hold up user code.
+func SetExporter(e Exporter) {
+ p := unsafe.Pointer(&e)
+ if e == nil {
+ // &e is always valid, and so p is always valid, but for the early abort
+ // of ProcessEvent to be efficient it needs to make the nil check on the
+ // pointer without having to dereference it, so we make the nil function
+ // also a nil pointer
+ p = nil
+ }
+ atomic.StorePointer(&exporter, p)
+}
+
+// deliver is called to deliver an event to the supplied exporter.
+// it will fill in the time.
+func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
+ // add the current time to the event
+ ev.at = time.Now()
+ // hand the event off to the current exporter
+ return exporter(ctx, ev, ev)
+}
+
+// Export is called to deliver an event to the global exporter if set.
+func Export(ctx context.Context, ev Event) context.Context {
+ // get the global exporter and abort early if there is not one
+ exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ if exporterPtr == nil {
+ return ctx
+ }
+ return deliver(ctx, *exporterPtr, ev)
+}
+
+// ExportPair is called to deliver a start event to the supplied exporter.
+// It also returns a function that will deliver the end event to the same
+// exporter.
+// It will fill in the time.
+func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
+ // get the global exporter and abort early if there is not one
+ exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ if exporterPtr == nil {
+ return ctx, func() {}
+ }
+ ctx = deliver(ctx, *exporterPtr, begin)
+ return ctx, func() { deliver(ctx, *exporterPtr, end) }
+}
diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go
new file mode 100644
index 0000000000..06c1d4615e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/core/fast.go
@@ -0,0 +1,77 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package core
+
+import (
+ "context"
+
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Log1 takes a message and one label delivers a log event to the exporter.
+// It is a customized version of Print that is faster and does no allocation.
+func Log1(ctx context.Context, message string, t1 label.Label) {
+ Export(ctx, MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ t1,
+ }, nil))
+}
+
+// Log2 takes a message and two labels and delivers a log event to the exporter.
+// It is a customized version of Print that is faster and does no allocation.
+func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
+ Export(ctx, MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ t1,
+ t2,
+ }, nil))
+}
+
+// Metric1 sends a label event to the exporter with the supplied labels.
+func Metric1(ctx context.Context, t1 label.Label) context.Context {
+ return Export(ctx, MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ t1,
+ }, nil))
+}
+
+// Metric2 sends a label event to the exporter with the supplied labels.
+func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
+ return Export(ctx, MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ t1,
+ t2,
+ }, nil))
+}
+
+// Start1 sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
+ return ExportPair(ctx,
+ MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ t1,
+ }, nil),
+ MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
+
+// Start2 sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
+ return ExportPair(ctx,
+ MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ t1,
+ t2,
+ }, nil),
+ MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go
new file mode 100644
index 0000000000..5dc6e6babe
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package event provides a set of packages that cover the main
+// concepts of telemetry in an implementation agnostic way.
+package event
diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go
new file mode 100644
index 0000000000..4d55e577d1
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/event.go
@@ -0,0 +1,127 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package event
+
+import (
+ "context"
+
+ "golang.org/x/tools/internal/event/core"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Exporter is a function that handles events.
+// It may return a modified context and event.
+type Exporter func(context.Context, core.Event, label.Map) context.Context
+
+// SetExporter sets the global exporter function that handles all events.
+// The exporter is called synchronously from the event call site, so it should
+// return quickly so as not to hold up user code.
+func SetExporter(e Exporter) {
+ core.SetExporter(core.Exporter(e))
+}
+
+// Log takes a message and a label list and combines them into a single event
+// before delivering them to the exporter.
+func Log(ctx context.Context, message string, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ }, labels))
+}
+
+// IsLog returns true if the event was built by the Log function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsLog(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Msg
+}
+
+// Error takes a message and a label list and combines them into a single event
+// before delivering them to the exporter. It captures the error in the
+// delivered event.
+func Error(ctx context.Context, message string, err error, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ keys.Err.Of(err),
+ }, labels))
+}
+
+// IsError returns true if the event was built by the Error function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsError(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Msg &&
+ ev.Label(1).Key() == keys.Err
+}
+
+// Metric sends a label event to the exporter with the supplied labels.
+func Metric(ctx context.Context, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ }, labels))
+}
+
+// IsMetric returns true if the event was built by the Metric function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsMetric(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Metric
+}
+
+// Label sends a label event to the exporter with the supplied labels.
+func Label(ctx context.Context, labels ...label.Label) context.Context {
+ return core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Label.New(),
+ }, labels))
+}
+
+// IsLabel returns true if the event was built by the Label function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsLabel(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Label
+}
+
+// Start sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
+ return core.ExportPair(ctx,
+ core.MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ }, labels),
+ core.MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
+
+// IsStart returns true if the event was built by the Start function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsStart(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Start
+}
+
+// IsEnd returns true if the event was built by the End function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsEnd(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.End
+}
+
+// Detach returns a context without an associated span.
+// This allows the creation of spans that are not children of the current span.
+func Detach(ctx context.Context) context.Context {
+ return core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Detach.New(),
+ }, nil))
+}
+
+// IsDetach returns true if the event was built by the Detach function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsDetach(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Detach
+}
diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go
new file mode 100644
index 0000000000..4cfa51b612
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go
@@ -0,0 +1,564 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Value represents a key for untyped values.
+type Value struct {
+ name string
+ description string
+}
+
+// New creates a new Key for untyped values.
+func New(name, description string) *Value {
+ return &Value{name: name, description: description}
+}
+
+func (k *Value) Name() string { return k.name }
+func (k *Value) Description() string { return k.description }
+
+func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
+ fmt.Fprint(w, k.From(l))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Value) Get(lm label.Map) any {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return nil
+}
+
+// From can be used to get a value from a Label.
+func (k *Value) From(t label.Label) any { return t.UnpackValue() }
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) }
+
+// Tag represents a key for tagging labels that have no value.
+// These are used when the existence of the label is the entire information it
+// carries, such as marking events to be of a specific kind, or from a specific
+// package.
+type Tag struct {
+ name string
+ description string
+}
+
+// NewTag creates a new Key for tagging labels.
+func NewTag(name, description string) *Tag {
+ return &Tag{name: name, description: description}
+}
+
+func (k *Tag) Name() string { return k.name }
+func (k *Tag) Description() string { return k.description }
+
+func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
+
+// New creates a new Label with this key.
+func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
+
+// Int represents a key
+type Int struct {
+ name string
+ description string
+}
+
+// NewInt creates a new Key for int values.
+func NewInt(name, description string) *Int {
+ return &Int{name: name, description: description}
+}
+
+func (k *Int) Name() string { return k.name }
+func (k *Int) Description() string { return k.description }
+
+func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int) Get(lm label.Map) int {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
+
+// Int8 represents a key
+type Int8 struct {
+ name string
+ description string
+}
+
+// NewInt8 creates a new Key for int8 values.
+func NewInt8(name, description string) *Int8 {
+ return &Int8{name: name, description: description}
+}
+
+func (k *Int8) Name() string { return k.name }
+func (k *Int8) Description() string { return k.description }
+
+func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int8) Get(lm label.Map) int8 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
+
+// Int16 represents a key
+type Int16 struct {
+ name string
+ description string
+}
+
+// NewInt16 creates a new Key for int16 values.
+func NewInt16(name, description string) *Int16 {
+ return &Int16{name: name, description: description}
+}
+
+func (k *Int16) Name() string { return k.name }
+func (k *Int16) Description() string { return k.description }
+
+func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int16) Get(lm label.Map) int16 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
+
+// Int32 represents a key
+type Int32 struct {
+ name string
+ description string
+}
+
+// NewInt32 creates a new Key for int32 values.
+func NewInt32(name, description string) *Int32 {
+ return &Int32{name: name, description: description}
+}
+
+func (k *Int32) Name() string { return k.name }
+func (k *Int32) Description() string { return k.description }
+
+func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int32) Get(lm label.Map) int32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
+
+// Int64 represents a key
+type Int64 struct {
+ name string
+ description string
+}
+
+// NewInt64 creates a new Key for int64 values.
+func NewInt64(name, description string) *Int64 {
+ return &Int64{name: name, description: description}
+}
+
+func (k *Int64) Name() string { return k.name }
+func (k *Int64) Description() string { return k.description }
+
+func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, k.From(l), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int64) Get(lm label.Map) int64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
+
+// UInt represents a key
+type UInt struct {
+ name string
+ description string
+}
+
+// NewUInt creates a new Key for uint values.
+func NewUInt(name, description string) *UInt {
+ return &UInt{name: name, description: description}
+}
+
+func (k *UInt) Name() string { return k.name }
+func (k *UInt) Description() string { return k.description }
+
+func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt) Get(lm label.Map) uint {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
+
+// UInt8 represents a key
+type UInt8 struct {
+ name string
+ description string
+}
+
+// NewUInt8 creates a new Key for uint8 values.
+func NewUInt8(name, description string) *UInt8 {
+ return &UInt8{name: name, description: description}
+}
+
+func (k *UInt8) Name() string { return k.name }
+func (k *UInt8) Description() string { return k.description }
+
+func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt8) Get(lm label.Map) uint8 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
+
+// UInt16 represents a key
+type UInt16 struct {
+ name string
+ description string
+}
+
+// NewUInt16 creates a new Key for uint16 values.
+func NewUInt16(name, description string) *UInt16 {
+ return &UInt16{name: name, description: description}
+}
+
+func (k *UInt16) Name() string { return k.name }
+func (k *UInt16) Description() string { return k.description }
+
+func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt16) Get(lm label.Map) uint16 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
+
+// UInt32 represents a key
+type UInt32 struct {
+ name string
+ description string
+}
+
+// NewUInt32 creates a new Key for uint32 values.
+func NewUInt32(name, description string) *UInt32 {
+ return &UInt32{name: name, description: description}
+}
+
+func (k *UInt32) Name() string { return k.name }
+func (k *UInt32) Description() string { return k.description }
+
+func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt32) Get(lm label.Map) uint32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
+
+// UInt64 represents a key
+type UInt64 struct {
+ name string
+ description string
+}
+
+// NewUInt64 creates a new Key for uint64 values.
+func NewUInt64(name, description string) *UInt64 {
+ return &UInt64{name: name, description: description}
+}
+
+func (k *UInt64) Name() string { return k.name }
+func (k *UInt64) Description() string { return k.description }
+
+func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, k.From(l), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt64) Get(lm label.Map) uint64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
+
+// Float32 represents a key
+type Float32 struct {
+ name string
+ description string
+}
+
+// NewFloat32 creates a new Key for float32 values.
+func NewFloat32(name, description string) *Float32 {
+ return &Float32{name: name, description: description}
+}
+
+func (k *Float32) Name() string { return k.name }
+func (k *Float32) Description() string { return k.description }
+
+func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Float32) Of(v float32) label.Label {
+ return label.Of64(k, uint64(math.Float32bits(v)))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Float32) Get(lm label.Map) float32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Float32) From(t label.Label) float32 {
+ return math.Float32frombits(uint32(t.Unpack64()))
+}
+
+// Float64 represents a key
+type Float64 struct {
+ name string
+ description string
+}
+
+// NewFloat64 creates a new Key for int64 values.
+func NewFloat64(name, description string) *Float64 {
+ return &Float64{name: name, description: description}
+}
+
+func (k *Float64) Name() string { return k.name }
+func (k *Float64) Description() string { return k.description }
+
+func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Float64) Of(v float64) label.Label {
+ return label.Of64(k, math.Float64bits(v))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Float64) Get(lm label.Map) float64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Float64) From(t label.Label) float64 {
+ return math.Float64frombits(t.Unpack64())
+}
+
+// String represents a key
+type String struct {
+ name string
+ description string
+}
+
+// NewString creates a new Key for int64 values.
+func NewString(name, description string) *String {
+ return &String{name: name, description: description}
+}
+
+func (k *String) Name() string { return k.name }
+func (k *String) Description() string { return k.description }
+
+func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendQuote(buf, k.From(l)))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *String) Get(lm label.Map) string {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return ""
+}
+
+// From can be used to get a value from a Label.
+func (k *String) From(t label.Label) string { return t.UnpackString() }
+
+// Boolean represents a key
+type Boolean struct {
+ name string
+ description string
+}
+
+// NewBoolean creates a new Key for bool values.
+func NewBoolean(name, description string) *Boolean {
+ return &Boolean{name: name, description: description}
+}
+
+func (k *Boolean) Name() string { return k.name }
+func (k *Boolean) Description() string { return k.description }
+
+func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendBool(buf, k.From(l)))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Boolean) Of(v bool) label.Label {
+ if v {
+ return label.Of64(k, 1)
+ }
+ return label.Of64(k, 0)
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Boolean) Get(lm label.Map) bool {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return false
+}
+
+// From can be used to get a value from a Label.
+func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
+
+// Error represents a key
+type Error struct {
+ name string
+ description string
+}
+
+// NewError creates a new Key for int64 values.
+func NewError(name, description string) *Error {
+ return &Error{name: name, description: description}
+}
+
+func (k *Error) Name() string { return k.name }
+func (k *Error) Description() string { return k.description }
+
+func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
+ io.WriteString(w, k.From(l).Error())
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Error) Get(lm label.Map) error {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return nil
+}
+
+// From can be used to get a value from a Label.
+func (k *Error) From(t label.Label) error {
+ err, _ := t.UnpackValue().(error)
+ return err
+}
diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go
new file mode 100644
index 0000000000..7e95866592
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+var (
+ // Msg is a key used to add message strings to label lists.
+ Msg = NewString("message", "a readable message")
+ // Label is a key used to indicate an event adds labels to the context.
+ Label = NewTag("label", "a label context marker")
+ // Start is used for things like traces that have a name.
+ Start = NewString("start", "span start")
+ // Metric is a key used to indicate an event records metrics.
+ End = NewTag("end", "a span end marker")
+ // Metric is a key used to indicate an event records metrics.
+ Detach = NewTag("detach", "a span detach marker")
+ // Err is a key used to add error values to label lists.
+ Err = NewError("error", "an error that occurred")
+ // Metric is a key used to indicate an event records metrics.
+ Metric = NewTag("metric", "a metric event marker")
+)
diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go
new file mode 100644
index 0000000000..c0e8e731c9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/keys/util.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+import (
+ "sort"
+ "strings"
+)
+
+// Join returns a canonical join of the keys in S:
+// a sorted comma-separated string list.
+func Join[S ~[]T, T ~string](s S) string {
+ strs := make([]string, 0, len(s))
+ for _, v := range s {
+ strs = append(strs, string(v))
+ }
+ sort.Strings(strs)
+ return strings.Join(strs, ",")
+}
diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go
new file mode 100644
index 0000000000..92a3910573
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/label/label.go
@@ -0,0 +1,214 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package label
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "slices"
+ "unsafe"
+)
+
+// Key is used as the identity of a Label.
+// Keys are intended to be compared by pointer only, the name should be unique
+// for communicating with external systems, but it is not required or enforced.
+type Key interface {
+ // Name returns the key name.
+ Name() string
+ // Description returns a string that can be used to describe the value.
+ Description() string
+
+ // Format is used in formatting to append the value of the label to the
+ // supplied buffer.
+ // The formatter may use the supplied buf as a scratch area to avoid
+ // allocations.
+ Format(w io.Writer, buf []byte, l Label)
+}
+
+// Label holds a key and value pair.
+// It is normally used when passing around lists of labels.
+type Label struct {
+ key Key
+ packed uint64
+ untyped any
+}
+
+// Map is the interface to a collection of Labels indexed by key.
+type Map interface {
+ // Find returns the label that matches the supplied key.
+ Find(key Key) Label
+}
+
+// List is the interface to something that provides an iterable
+// list of labels.
+// Iteration should start from 0 and continue until Valid returns false.
+type List interface {
+ // Valid returns true if the index is within range for the list.
+ // It does not imply the label at that index will itself be valid.
+ Valid(index int) bool
+ // Label returns the label at the given index.
+ Label(index int) Label
+}
+
+// list implements LabelList for a list of Labels.
+type list struct {
+ labels []Label
+}
+
+// filter wraps a LabelList filtering out specific labels.
+type filter struct {
+ keys []Key
+ underlying List
+}
+
+// listMap implements LabelMap for a simple list of labels.
+type listMap struct {
+ labels []Label
+}
+
+// mapChain implements LabelMap for a list of underlying LabelMap.
+type mapChain struct {
+ maps []Map
+}
+
+// OfValue creates a new label from the key and value.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} }
+
+// UnpackValue assumes the label was built using LabelOfValue and returns the value
+// that was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) UnpackValue() any { return t.untyped }
+
+// Of64 creates a new label from a key and a uint64. This is often
+// used for non uint64 values that can be packed into a uint64.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
+
+// Unpack64 assumes the label was built using LabelOf64 and returns the value that
+// was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) Unpack64() uint64 { return t.packed }
+
+type stringptr unsafe.Pointer
+
+// OfString creates a new label from a key and a string.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func OfString(k Key, v string) Label {
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
+ return Label{
+ key: k,
+ packed: uint64(hdr.Len),
+ untyped: stringptr(hdr.Data),
+ }
+}
+
+// UnpackString assumes the label was built using LabelOfString and returns the
+// value that was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) UnpackString() string {
+ var v string
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
+ hdr.Data = uintptr(t.untyped.(stringptr))
+ hdr.Len = int(t.packed)
+ return v
+}
+
+// Valid returns true if the Label is a valid one (it has a key).
+func (t Label) Valid() bool { return t.key != nil }
+
+// Key returns the key of this Label.
+func (t Label) Key() Key { return t.key }
+
+// Format is used for debug printing of labels.
+func (t Label) Format(f fmt.State, r rune) {
+ if !t.Valid() {
+ io.WriteString(f, `nil`)
+ return
+ }
+ io.WriteString(f, t.Key().Name())
+ io.WriteString(f, "=")
+ var buf [128]byte
+ t.Key().Format(f, buf[:0], t)
+}
+
+func (l *list) Valid(index int) bool {
+ return index >= 0 && index < len(l.labels)
+}
+
+func (l *list) Label(index int) Label {
+ return l.labels[index]
+}
+
+func (f *filter) Valid(index int) bool {
+ return f.underlying.Valid(index)
+}
+
+func (f *filter) Label(index int) Label {
+ l := f.underlying.Label(index)
+ if slices.Contains(f.keys, l.Key()) {
+ return Label{}
+ }
+ return l
+}
+
+func (lm listMap) Find(key Key) Label {
+ for _, l := range lm.labels {
+ if l.Key() == key {
+ return l
+ }
+ }
+ return Label{}
+}
+
+func (c mapChain) Find(key Key) Label {
+ for _, src := range c.maps {
+ l := src.Find(key)
+ if l.Valid() {
+ return l
+ }
+ }
+ return Label{}
+}
+
+var emptyList = &list{}
+
+func NewList(labels ...Label) List {
+ if len(labels) == 0 {
+ return emptyList
+ }
+ return &list{labels: labels}
+}
+
+func Filter(l List, keys ...Key) List {
+ if len(keys) == 0 {
+ return l
+ }
+ return &filter{keys: keys, underlying: l}
+}
+
+func NewMap(labels ...Label) Map {
+ return listMap{labels: labels}
+}
+
+func MergeMaps(srcs ...Map) Map {
+ var nonNil []Map
+ for _, src := range srcs {
+ if src != nil {
+ nonNil = append(nonNil, src)
+ }
+ }
+ if len(nonNil) == 1 {
+ return nonNil[0]
+ }
+ return mapChain{maps: nonNil}
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
new file mode 100644
index 0000000000..734c46198d
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the remaining vestiges of
+// $GOROOT/src/go/internal/gcimporter/bimport.go.
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sync"
+)
+
+func errorf(format string, args ...any) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*fileInfo
+}
+
+type fileInfo struct {
+ file *token.File
+ lastline int
+}
+
+const maxlines = 64 * 1024
+
+func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
+ // TODO(mdempsky): Make use of column.
+
+ // Since we don't know the set of needed file positions, we reserve maxlines
+ // positions per file. We delay calling token.File.SetLines until all
+ // positions have been calculated (by way of fakeFileSet.setLines), so that
+ // we can avoid setting unnecessary lines. See also golang/go#46586.
+ f := s.files[file]
+ if f == nil {
+ f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
+ s.files[file] = f
+ }
+ if line > maxlines {
+ line = 1
+ }
+ if line > f.lastline {
+ f.lastline = line
+ }
+
+ // Return a fake position assuming that f.file consists only of newlines.
+ return token.Pos(f.file.Base() + line - 1)
+}
+
+func (s *fakeFileSet) setLines() {
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ for _, f := range s.files {
+ f.file.SetLines(fakeLines[:f.lastline])
+ }
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
new file mode 100644
index 0000000000..5662a311da
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -0,0 +1,421 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
+// This file also additionally implements FindExportData for gcexportdata.NewReader.
+
+package gcimporter
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "go/build"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying cmd/compile created archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function.
+// This returns the length of the export data in bytes.
+//
+// This function is needed by [gcexportdata.Read], which must
+// accept inputs produced by the last two releases of cmd/compile,
+// plus tip.
+func FindExportData(r *bufio.Reader) (size int64, err error) {
+ arsize, err := FindPackageDefinition(r)
+ if err != nil {
+ return
+ }
+ size = int64(arsize)
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
+ return
+ }
+ size -= int64(len(objapi))
+ for _, h := range headers {
+ size -= int64(len(h))
+ }
+
+ // Check for the binary export data section header "$$B\n".
+ // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ hdr := string(line)
+ if hdr != "$$B\n" {
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+ size -= int64(len(hdr))
+
+ // For files with a binary export data header "$$B\n",
+ // these are always terminated by an end-of-section marker "\n$$\n".
+ // So the last bytes must always be this constant.
+ //
+ // The end-of-section marker is not a part of the export data itself.
+ // Do not include these in size.
+ //
+ // It would be nice to have sanity check that the final bytes after
+ // the export data are indeed the end-of-section marker. The split
+ // of gcexportdata.NewReader and gcexportdata.Read make checking this
+ // ugly so gcimporter gives up enforcing this. The compiler and go/types
+ // importer do enforce this, which seems good enough.
+ const endofsection = "\n$$\n"
+ size -= int64(len(endofsection))
+
+ if size < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
+ return
+ }
+
+ return
+}
+
+// ReadUnified reads the contents of the unified export data from a reader r
+// that contains the contents of a GC-created archive file.
+//
+// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
+//
+// Supported GC-created archive files have 4 layers of nesting:
+// - An archive file containing a package definition file.
+// - The package definition file contains headers followed by a data section.
+// Headers are lines (≤ 4kb) that do not start with "$$".
+// - The data section starts with "$$B\n" followed by export data followed
+// by an end of section marker "\n$$\n". (The section start "$$\n" is no
+// longer supported.)
+// - The export data starts with a format byte ('u') followed by the in
+// the given format. (See ReadExportDataHeader for older formats.)
+//
+// Putting this together, the bytes in a GC-created archive files are expected
+// to look like the following.
+// See cmd/internal/archive for more details on ar file headers.
+//
+// | \n | ar file signature
+// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
+// | go object <...>\n | objabi header
+// | \n | other headers such as build id
+// | $$B\n | binary format marker
+// | u\n | unified export
+// | $$\n | end-of-section marker
+// | [optional padding] | padding byte (0x0A) if size is odd
+// | [ar file header] | other ar files
+// | [ar file data] |
+func ReadUnified(r *bufio.Reader) (data []byte, err error) {
+ // We historically guaranteed headers at the default buffer size (4096) work.
+ // This ensures we can use ReadSlice throughout.
+ const minBufferSize = 4096
+ r = bufio.NewReaderSize(r, minBufferSize)
+
+ size, err := FindPackageDefinition(r)
+ if err != nil {
+ return
+ }
+ n := size
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
+ return
+ }
+ n -= len(objapi)
+ for _, h := range headers {
+ n -= len(h)
+ }
+
+ hdrlen, err := ReadExportDataHeader(r)
+ if err != nil {
+ return
+ }
+ n -= hdrlen
+
+ // size also includes the end of section marker. Remove that many bytes from the end.
+ const marker = "\n$$\n"
+ n -= len(marker)
+
+ if n < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
+ return
+ }
+
+ // Read n bytes from buf.
+ data = make([]byte, n)
+ _, err = io.ReadFull(r, data)
+ if err != nil {
+ return
+ }
+
+ // Check for marker at the end.
+ var suffix [len(marker)]byte
+ _, err = io.ReadFull(r, suffix[:])
+ if err != nil {
+ return
+ }
+ if s := string(suffix[:]); s != marker {
+ err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
+ return
+ }
+
+ return
+}
+
+// FindPackageDefinition positions the reader r at the beginning of a package
+// definition file ("__.PKGDEF") within a GC-created archive by reading
+// from it, and returns the size of the package definition file in the archive.
+//
+// The reader must be positioned at the start of the archive file before calling
+// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
+//
+// See cmd/internal/archive for details on the archive format.
+func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
+ // Uses ReadSlice to limit risk of malformed inputs.
+
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ // Is the first line an archive file signature?
+ if string(line) != "!\n" {
+ err = fmt.Errorf("not the start of an archive file (%q)", line)
+ return
+ }
+
+ // package export block should be first
+ size = readArchiveHeader(r, "__.PKGDEF")
+ if size <= 0 {
+ err = fmt.Errorf("not a package file")
+ return
+ }
+
+ return
+}
+
+// ReadObjectHeaders reads object headers from the reader. Object headers are
+// lines that do not start with an end-of-section marker "$$". The first header
+// is the objabi header. On success, the reader will be positioned at the beginning
+// of the end-of-section marker.
+//
+// It returns an error if any header does not fit in r.Size() bytes.
+func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
+ // line is a temporary buffer for headers.
+ // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
+ var line []byte
+
+ // objapi header should be the first line
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ objapi = string(line)
+
+ // objapi header begins with "go object ".
+ if !strings.HasPrefix(objapi, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", objapi)
+ return
+ }
+
+ // process remaining object header lines
+ for {
+ // check for an end of section marker "$$"
+ line, err = r.Peek(2)
+ if err != nil {
+ return
+ }
+ if string(line) == "$$" {
+ return // stop
+ }
+
+ // read next header
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ headers = append(headers, string(line))
+ }
+}
+
+// ReadExportDataHeader reads the export data header and format from r.
+// It returns the number of bytes read, or an error if the format is no longer
+// supported or it failed to read.
+//
+// The only currently supported format is binary export data in the
+// unified export format.
+func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
+ // Read export data header.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+
+ hdr := string(line)
+ switch hdr {
+ case "$$\n":
+ err = fmt.Errorf("old textual export format no longer supported (recompile package)")
+ return
+
+ case "$$B\n":
+ var format byte
+ format, err = r.ReadByte()
+ if err != nil {
+ return
+ }
+ // The unified export format starts with a 'u'.
+ switch format {
+ case 'u':
+ default:
+ // Older no longer supported export formats include:
+ // indexed export format which started with an 'i'; and
+ // the older binary export format which started with a 'c',
+ // 'd', or 'v' (from "version").
+ err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
+ return
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+
+ n = len(hdr) + 1 // + 1 is for 'u'
+ return
+}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+//
+// FindPkg is only used in tests within x/tools.
+func FindPkg(path, srcDir string) (filename, id string, err error) {
+ // TODO(taking): Move internal/exportdata.FindPkg into its own file,
+ // and then this copy into a _test package.
+ if path == "" {
+ return "", "", errors.New("path is empty")
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ var bp *build.Package
+ bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ if bp.Goroot && bp.Dir != "" {
+ filename, err = lookupGorootExport(bp.Dir)
+ if err == nil {
+ _, err = os.Stat(filename)
+ }
+ if err == nil {
+ return filename, bp.ImportPath, nil
+ }
+ }
+ goto notfound
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ }
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ f, statErr := os.Stat(filename)
+ if statErr == nil && !f.IsDir() {
+ return filename, id, nil
+ }
+ if err == nil {
+ err = statErr
+ }
+ }
+
+notfound:
+ if err == nil {
+ return "", path, fmt.Errorf("can't find import: %q", path)
+ }
+ return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
+}
+
+var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
+
+var exportMap sync.Map // package dir → func() (string, error)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+//
+// lookupGorootExport is only used in tests within x/tools.
+func lookupGorootExport(pkgDir string) (string, error) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ err error
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
+ listOnce.Do(func() {
+ cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
+ var output []byte
+ output, err = cmd.Output()
+ if err != nil {
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ err = errors.New(string(ee.Stderr))
+ }
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, err
+ })
+ }
+
+ return f.(func() (string, error))()
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
new file mode 100644
index 0000000000..3dbd21d1b9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -0,0 +1,108 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
+
+// Package gcimporter provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+//
+// The encoding is deterministic: if the encoder is applied twice to
+// the same types.Package data structure, both encodings are equal.
+// This property may be important to avoid spurious changes in
+// applications such as build systems.
+//
+// However, the encoder is not necessarily idempotent. Importing an
+// exported package may yield a types.Package that, while it
+// represents the same set of Go types as the original, may differ in
+// the details of its internal representation. Because of these
+// differences, re-encoding the imported package may yield a
+// different, but equally valid, encoding of the package.
+package gcimporter // import "golang.org/x/tools/internal/gcimporter"
+
+import (
+ "bufio"
+ "fmt"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+)
+
+const (
+ // Enable debug during development: it adds some additional checks, and
+ // prevents errors from being recovered.
+ debug = false
+
+ // If trace is set, debugging output is printed to std out.
+ trace = false
+)
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+//
+// Import is only used in tests.
+func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ var filename string
+ filename, id, err = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, err
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ buf := bufio.NewReader(rc)
+ data, err := ReadUnified(buf)
+ if err != nil {
+ err = fmt.Errorf("import %q: %v", path, err)
+ return
+ }
+
+ // unified: emitted by cmd/compile since go1.20.
+ _, pkg, err = UImportData(fset, packages, data, id)
+
+ return
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
new file mode 100644
index 0000000000..4a4357d2bd
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -0,0 +1,1595 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F' or 'G'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T' or 'U'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A' or 'B'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'B'
+// Type typeOff
+// }
+//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Implicit bool
+// Constraint typeOff
+// }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
+)
+
+// IExportShallow encodes "shallow" export data for the specified package.
+//
+// For types, we use "shallow" export data. Historically, the Go
+// compiler always produced a summary of the types for a given package
+// that included types from other packages that it indirectly
+// referenced: "deep" export data. This had the advantage that the
+// compiler (and analogous tools such as gopls) need only load one
+// file per direct import. However, it meant that the files tended to
+// get larger based on the level of the package in the import
+// graph. For example, higher-level packages in the kubernetes module
+// have over 1MB of "deep" export data, even when they have almost no
+// content of their own, merely because they mention a major type that
+// references many others. In pathological cases the export data was
+// 300x larger than the source for a package due to this quadratic
+// growth.
+//
+// "Shallow" export data means that the serialized types describe only
+// a single package. If those types mention types from other packages,
+// the type checker may need to request additional packages beyond
+// just the direct imports. Type information for the entire transitive
+// closure of imports is provided (lazily) by the DAG.
+//
+// No promises are made about the encoding other than that it can be decoded by
+// the same version of IIExportShallow. If you plan to save export data in the
+// file system, be sure to include a cryptographic digest of the executable in
+// the key to avoid version skew.
+//
+// If the provided reportf func is non-nil, it is used for reporting
+// bugs (e.g. recovered panics) encountered during export, enabling us
+// to obtain via telemetry the stack that would otherwise be lost by
+// merely returning an error.
+func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) {
+ // In principle this operation can only fail if out.Write fails,
+ // but that's impossible for bytes.Buffer---and as a matter of
+ // fact iexportCommon doesn't even check for I/O errors.
+ // TODO(adonovan): handle I/O errors properly.
+ // TODO(adonovan): use byte slices throughout, avoiding copying.
+ const bundle, shallow = false, true
+ var out bytes.Buffer
+ err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, reportf)
+ return out.Bytes(), err
+}
+
+// IImportShallow decodes "shallow" types.Package data encoded by
+// [IExportShallow] in the same executable. This function cannot import data
+// from cmd/compile or gcexportdata.Write.
+//
+// The importer calls getPackages to obtain package symbols for all
+// packages mentioned in the export data, including the one being
+// decoded.
+//
+// If the provided reportf func is non-nil, it will be used for reporting bugs
+// encountered during import.
+// TODO(rfindley): remove reportf when we are confident enough in the new
+// objectpath encoding.
+func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) {
+ const bundle = false
+ const shallow = true
+ pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf)
+ if err != nil {
+ return nil, err
+ }
+ return pkgs[0], nil
+}
+
+// ReportFunc is the type of a function used to report formatted bugs.
+type ReportFunc = func(string, ...any)
+
+// Current bundled export format version. Increase with each format change.
+// 0: initial implementation
+const bundleVersion = 0
+
+// IExportData writes indexed export data for pkg to out.
+//
+// If no file set is provided, position info will be missing.
+// The package path of the top-level package will not be recorded,
+// so that calls to IImportData can override with a provided package path.
+func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ const bundle, shallow = false, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, nil)
+}
+
+// IExportBundle writes an indexed export bundle for pkgs to out.
+func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ const bundle, shallow = true, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs, nil)
+}
+
+func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package, reportf ReportFunc) (err error) {
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ // Report the stack via telemetry (see #71067).
+ if reportf != nil {
+ reportf("panic in exporter")
+ }
+ if ierr, ok := e.(internalError); ok {
+ // internalError usually means we exported a
+ // bad go/types data structure: a violation
+ // of an implicit precondition of Export.
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+ }
+
+ p := iexporter{
+ fset: fset,
+ version: version,
+ shallow: shallow,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ tparamNames: map[types.Object]string{},
+ typIndex: map[types.Type]uint64{},
+ }
+ if !bundle {
+ p.localpkg = pkgs[0]
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ for _, pkg := range pkgs {
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if token.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ if bundle {
+ // Ensure pkg and its imports are included in the index.
+ p.allPkgs[pkg] = true
+ for _, imp := range pkg.Imports() {
+ p.allPkgs[imp] = true
+ }
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Produce index of offset of each file record in files.
+ var files intWriter
+ var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
+ if p.shallow {
+ fileOffset = make([]uint64, len(p.fileInfos))
+ for i, info := range p.fileInfos {
+ fileOffset[i] = uint64(files.Len())
+ p.encodeFile(&files, info.file, info.needed)
+ }
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex)
+
+ if bundle {
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.pkg(pkg)
+ imps := pkg.Imports()
+ w.uint64(uint64(len(imps)))
+ for _, imp := range imps {
+ w.pkg(imp)
+ }
+ }
+ }
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ if bundle {
+ hdr.uint64(bundleVersion)
+ }
+ hdr.uint64(uint64(p.version))
+ hdr.uint64(uint64(p.strings.Len()))
+ if p.shallow {
+ hdr.uint64(uint64(files.Len()))
+ hdr.uint64(uint64(len(fileOffset)))
+ for _, offset := range fileOffset {
+ hdr.uint64(offset)
+ }
+ }
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(out, &hdr)
+ io.Copy(out, &p.strings)
+ if p.shallow {
+ io.Copy(out, &files)
+ }
+ io.Copy(out, &p.data0)
+
+ return nil
+}
+
+// encodeFile writes to w a representation of the file sufficient to
+// faithfully restore position information about all needed offsets.
+// Mutates the needed array.
+func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
+ _ = needed[0] // precondition: needed is non-empty
+
+ w.uint64(p.stringOff(file.Name()))
+
+ size := uint64(file.Size())
+ w.uint64(size)
+
+ // Sort the set of needed offsets. Duplicates are harmless.
+ slices.Sort(needed)
+
+ lines := file.Lines() // byte offset of each line start
+ w.uint64(uint64(len(lines)))
+
+ // Rather than record the entire array of line start offsets,
+ // we save only a sparse list of (index, offset) pairs for
+ // the start of each line that contains a needed position.
+ var sparse [][2]int // (index, offset) pairs
+outer:
+ for i, lineStart := range lines {
+ lineEnd := size
+ if i < len(lines)-1 {
+ lineEnd = uint64(lines[i+1])
+ }
+ // Does this line contains a needed offset?
+ if needed[0] < lineEnd {
+ sparse = append(sparse, [2]int{i, lineStart})
+ for needed[0] < lineEnd {
+ needed = needed[1:]
+ if len(needed) == 0 {
+ break outer
+ }
+ }
+ }
+ }
+
+ // Delta-encode the columns.
+ w.uint64(uint64(len(sparse)))
+ var prev [2]int
+ for _, pair := range sparse {
+ w.uint64(uint64(pair[0] - prev[0]))
+ w.uint64(uint64(pair[1] - prev[1]))
+ prev = pair
+ }
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
+ type pkgObj struct {
+ obj types.Object
+ name string // qualified name; differs from obj.Name for type params
+ }
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]pkgObj{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ if w.p.localpkg != nil {
+ pkgObjs[w.p.localpkg] = nil
+ }
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ name := w.p.exportName(obj)
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].name < objs[j].name
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(w.exportPath(pkg))
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.name)
+ w.uint64(index[obj.obj])
+ }
+ }
+}
+
+// exportName returns the 'exported' name of an object. It differs from
+// obj.Name() only for type parameters (see tparamExportName for details).
+func (p *iexporter) exportName(obj types.Object) (res string) {
+ if name := p.tparamNames[obj]; name != "" {
+ return name
+ }
+ return obj.Name()
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ version int
+
+ shallow bool // don't put types from other packages in the index
+ objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated
+ localpkg *types.Package // (nil in bundle mode)
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ // In shallow mode, object positions are encoded as (file, offset).
+ // Each file is recorded as a line-number table.
+ // Only the lines of needed positions are saved faithfully.
+ fileInfo map[*token.File]uint64 // value is index in fileInfos
+ fileInfos []*filePositions
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ tparamNames map[types.Object]string // typeparam->exported name
+ typIndex map[types.Type]uint64
+
+ indent int // for tracing support
+}
+
+type filePositions struct {
+ file *token.File
+ needed []uint64 // unordered list of needed file offsets
+}
+
+func (p *iexporter) trace(format string, args ...any) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+// objectpathEncoder returns the lazily allocated objectpath.Encoder to use
+// when encoding objects in other packages during shallow export.
+//
+// Using a shared Encoder amortizes some of cost of objectpath search.
+func (p *iexporter) objectpathEncoder() *objectpath.Encoder {
+ if p.objEncoder == nil {
+ p.objEncoder = new(objectpath.Encoder)
+ }
+ return p.objEncoder
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
+func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
+ index, ok := p.fileInfo[file]
+ if !ok {
+ index = uint64(len(p.fileInfo))
+ p.fileInfos = append(p.fileInfos, &filePositions{file: file})
+ if p.fileInfo == nil {
+ p.fileInfo = make(map[*token.File]uint64)
+ }
+ p.fileInfo[file] = index
+ }
+ // Record each needed offset.
+ info := p.fileInfos[index]
+ offset := uint64(file.Offset(pos))
+ info.needed = append(info.needed, offset)
+
+ return index, offset
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ // Caller should not ask us to do export it.
+ if obj.Pkg() == types.Unsafe {
+ panic("cannot export package unsafe")
+ }
+
+ // Shallow export data: don't index decls from other packages.
+ if p.shallow && obj.Pkg() != p.localpkg {
+ return
+ }
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark obj present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (w *exportWriter) exportPath(pkg *types.Package) string {
+ if pkg == w.p.localpkg {
+ return ""
+ }
+ return pkg.Path()
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ if trace {
+ p.trace("exporting decl %v (%T)", obj, obj)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", obj)
+ }()
+ }
+ w := p.newWriter()
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag(varTag)
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ // We shouldn't see methods in the package scope,
+ // but the type checker may repair "func () F() {}"
+ // to "func (Invalid) F()" and then treat it like "func F()",
+ // so allow that. See golang/go#57729.
+ if sig.Recv().Type() != types.Typ[types.Invalid] {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ }
+
+ // Function.
+ if sig.TypeParams().Len() == 0 {
+ w.tag(funcTag)
+ } else {
+ w.tag(genericFuncTag)
+ }
+ w.pos(obj.Pos())
+ // The tparam list of the function type is the declaration of the type
+ // params. So, write out the type params right now. Then those type params
+ // will be referenced via their type offset (via typOff) in all other
+ // places in the signature and function where they are used.
+ //
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ if tparams := sig.TypeParams(); tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag(constTag)
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ t := obj.Type()
+
+ if tparam, ok := types.Unalias(t).(*types.TypeParam); ok {
+ w.tag(typeParamTag)
+ w.pos(obj.Pos())
+ constraint := tparam.Constraint()
+ if p.version >= iexportVersionGo1_18 {
+ implicit := false
+ if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil {
+ implicit = iface.IsImplicit()
+ }
+ w.bool(implicit)
+ }
+ w.typ(constraint, obj.Pkg())
+ break
+ }
+
+ if obj.IsAlias() {
+ alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled
+
+ var tparams *types.TypeParamList
+ if materialized {
+ tparams = aliases.TypeParams(alias)
+ }
+ if tparams.Len() == 0 {
+ w.tag(aliasTag)
+ } else {
+ w.tag(genericAliasTag)
+ }
+ w.pos(obj.Pos())
+ if tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ if materialized {
+ // Preserve materialized aliases,
+ // even of non-exported types.
+ t = aliases.Rhs(alias)
+ }
+ w.typ(t, obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ if named.TypeParams().Len() == 0 {
+ w.tag(typeTag)
+ } else {
+ w.tag(genericTypeTag)
+ }
+ w.pos(obj.Pos())
+
+ if named.TypeParams().Len() > 0 {
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg())
+ }
+
+ underlying := named.Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ if types.IsInterface(t) {
+ break
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := range n {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+
+ // Receiver type parameters are type arguments of the receiver type, so
+ // their name must be qualified before exporting recv.
+ if rparams := sig.RecvTypeParams(); rparams.Len() > 0 {
+ prefix := obj.Name() + "." + m.Name()
+ for i := 0; i < rparams.Len(); i++ {
+ rparam := rparams.At(i)
+ name := tparamExportName(prefix, rparam)
+ w.p.tparamNames[rparam.Obj()] = name
+ }
+ }
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ if w.p.shallow {
+ w.posV2(pos)
+ } else if w.p.version >= iexportVersionPosCol {
+ w.posV1(pos)
+ } else {
+ w.posV0(pos)
+ }
+}
+
+// posV2 encoding (used only in shallow mode) records positions as
+// (file, offset), where file is the index in the token.File table
+// (which records the file name and newline offsets) and offset is a
+// byte offset. It effectively ignores //line directives.
+func (w *exportWriter) posV2(pos token.Pos) {
+ if pos == token.NoPos {
+ w.uint64(0)
+ return
+ }
+ file := w.p.fset.File(pos) // fset must be non-nil
+ index, offset := w.p.fileIndexAndOffset(file, pos)
+ w.uint64(1 + index)
+ w.uint64(offset)
+}
+
+func (w *exportWriter) posV1(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+ column := int64(p.Column)
+
+ deltaColumn := (column - w.prevColumn) << 1
+ deltaLine := (line - w.prevLine) << 1
+
+ if file != w.prevFile {
+ deltaLine |= 1
+ }
+ if deltaLine != 0 {
+ deltaColumn |= 1
+ }
+
+ w.int64(deltaColumn)
+ if deltaColumn&1 != 0 {
+ w.int64(deltaLine)
+ if deltaLine&1 != 0 {
+ w.string(file)
+ }
+ }
+
+ w.prevFile = file
+ w.prevLine = line
+ w.prevColumn = column
+}
+
+func (w *exportWriter) posV0(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(w.exportPath(pkg))
+}
+
+func (w *exportWriter) qualifiedType(obj *types.TypeName) {
+ name := w.p.exportName(obj)
+
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+ w.string(name)
+ w.pkg(obj.Pkg())
+}
+
+// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass
+// it in explicitly into signatures and structs that may use it for
+// constructing fields.
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ if trace {
+ w.p.trace("exporting type %s (%T)", t, t)
+ w.p.indent++
+ defer func() {
+ w.p.indent--
+ w.p.trace("=> %s", t)
+ }()
+ }
+ switch t := t.(type) {
+ case *types.Alias:
+ if targs := aliases.TypeArgs(t); targs.Len() > 0 {
+ w.startType(instanceType)
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(aliases.Origin(t), pkg)
+ return
+ }
+ w.startType(aliasType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Named:
+ if targs := t.TypeArgs(); targs.Len() > 0 {
+ w.startType(instanceType)
+ // TODO(rfindley): investigate if this position is correct, and if it
+ // matters.
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(t.Origin(), pkg)
+ return
+ }
+ w.startType(definedType)
+ w.qualifiedType(t.Obj())
+
+ case *types.TypeParam:
+ w.startType(typeParamType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.pkg(pkg)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ n := t.NumFields()
+ // Even for struct{} we must emit some qualifying package, because that's
+ // what the compiler does, and thus that's what the importer expects.
+ fieldPkg := pkg
+ if n > 0 {
+ fieldPkg = t.Field(0).Pkg()
+ }
+ if fieldPkg == nil {
+ // TODO(rfindley): improve this very hacky logic.
+ //
+ // The importer expects a package to be set for all struct types, even
+ // those with no fields. A better encoding might be to set NumFields
+ // before pkg. setPkg panics with a nil package, which may be possible
+ // to reach with invalid packages (and perhaps valid packages, too?), so
+ // (arbitrarily) set the localpkg if available.
+ //
+ // Alternatively, we may be able to simply guarantee that pkg != nil, by
+ // reconsidering the encoding of constant values.
+ if w.p.shallow {
+ fieldPkg = w.p.localpkg
+ } else {
+ panic(internalErrorf("no package to set for empty struct"))
+ }
+ }
+ w.pkg(fieldPkg)
+ w.uint64(uint64(n))
+
+ for i := range n {
+ f := t.Field(i)
+ if w.p.shallow {
+ w.objectPath(f)
+ }
+ w.pos(f.Pos())
+ w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg
+ w.typ(f.Type(), fieldPkg)
+ w.bool(f.Anonymous())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.pkg(pkg)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ ft := t.EmbeddedType(i)
+ tPkg := pkg
+ if named, _ := types.Unalias(ft).(*types.Named); named != nil {
+ w.pos(named.Obj().Pos())
+ } else {
+ w.pos(token.NoPos)
+ }
+ w.typ(ft, tPkg)
+ }
+
+ // See comment for struct fields. In shallow mode we change the encoding
+ // for interface methods that are promoted from other packages.
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ if w.p.shallow {
+ w.objectPath(m)
+ }
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ case *types.Union:
+ w.startType(unionType)
+ nt := t.Len()
+ w.uint64(uint64(nt))
+ for i := range nt {
+ term := t.Term(i)
+ w.bool(term.Tilde())
+ w.typ(term.Type(), pkg)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+// objectPath writes the package and objectPath to use to look up obj in a
+// different package, when encoding in "shallow" mode.
+//
+// When doing a shallow import, the importer creates only the local package,
+// and requests package symbols for dependencies from the client.
+// However, certain types defined in the local package may hold objects defined
+// (perhaps deeply) within another package.
+//
+// For example, consider the following:
+//
+// package a
+// func F() chan * map[string] struct { X int }
+//
+// package b
+// import "a"
+// var B = a.F()
+//
+// In this example, the type of b.B holds fields defined in package a.
+// In order to have the correct canonical objects for the field defined in the
+// type of B, they are encoded as objectPaths and later looked up in the
+// importer. The same problem applies to interface methods.
+func (w *exportWriter) objectPath(obj types.Object) {
+ if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg {
+ // obj.Pkg() may be nil for the builtin error.Error.
+ // In this case, or if obj is declared in the local package, no need to
+ // encode.
+ w.string("")
+ return
+ }
+ objectPath, err := w.p.objectpathEncoder().For(obj)
+ if err != nil {
+ // Fall back to the empty string, which will cause the importer to create a
+ // new object, which matches earlier behavior. Creating a new object is
+ // sufficient for many purposes (such as type checking), but causes certain
+ // references algorithms to fail (golang/go#60819). However, we didn't
+ // notice this problem during months of gopls@v0.12.0 testing.
+ //
+ // TODO(golang/go#61674): this workaround is insufficient, as in the case
+ // where the field forwarded from an instantiated type that may not appear
+ // in the export data of the original package:
+ //
+ // // package a
+ // type A[P any] struct{ F P }
+ //
+ // // package b
+ // type B a.A[int]
+ //
+ // We need to update references algorithms not to depend on this
+ // de-duplication, at which point we may want to simply remove the
+ // workaround here.
+ w.string("")
+ return
+ }
+ w.string(string(objectPath))
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) {
+ w.uint64(uint64(ts.Len()))
+ for i := 0; i < ts.Len(); i++ {
+ w.typ(ts.At(i), pkg)
+ }
+}
+
+func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) {
+ ll := uint64(list.Len())
+ w.uint64(ll)
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ // Set the type parameter exportName before exporting its type.
+ exportName := tparamExportName(prefix, tparam)
+ w.p.tparamNames[tparam.Obj()] = exportName
+ w.typ(list.At(i), pkg)
+ }
+}
+
+const blankMarker = "$"
+
+// tparamExportName returns the 'exported' name of a type parameter, which
+// differs from its actual object name: it is prefixed with a qualifier, and
+// blank type parameter names are disambiguated by their index in the type
+// parameter list.
+func tparamExportName(prefix string, tparam *types.TypeParam) string {
+ assert(prefix != "")
+ name := tparam.Obj().Name()
+ if name == "_" {
+ name = blankMarker + strconv.Itoa(tparam.Index())
+ }
+ return prefix + "." + name
+}
+
+// tparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See tparamExportName
+// for details.
+func tparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ errorf("malformed type parameter export name %s: missing prefix", exportName)
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := range n {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+ if w.p.version >= iexportVersionGo1_18 {
+ w.int64(int64(v.Kind()))
+ }
+
+ if v.Kind() == constant.Unknown {
+ // golang/go#60605: treat unknown constant values as if they have invalid type
+ //
+ // This loses some fidelity over the package type-checked from source, but that
+ // is acceptable.
+ //
+ // TODO(rfindley): we should switch on the recorded constant kind rather
+ // than the constant type
+ return
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ w.bool(constant.BoolVal(v))
+ case types.IsInteger:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case types.IsFloat:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case types.IsComplex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case types.IsString:
+ w.string(constant.StringVal(v))
+ default:
+ if b.Kind() == types.Invalid {
+ // package contains type errors
+ break
+ }
+ panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ x = constant.ToFloat(x)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+// TODO(adonovan): make this call panic, so that it's symmetric with errorf.
+// Otherwise it's easy to forget to do anything with the error.
+//
+// TODO(adonovan): also, consider switching the names "errorf" and
+// "internalErrorf" as the former is used for bugs, whose cause is
+// internal inconsistency, whereas the latter is used for ordinary
+// situations like bad input, whose cause is external.
+func internalErrorf(format string, args ...any) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
new file mode 100644
index 0000000000..82e6c9d2dc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -0,0 +1,1120 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See iexport.go for the export data format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "slices"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGo1_18 = 2
+ iexportVersionGenerics = 2
+ iexportVersion = iexportVersionGenerics
+
+ iexportVersionCurrent = 2
+)
+
+type ident struct {
+ pkg *types.Package
+ name string
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType
+ unionType
+ aliasType
+)
+
+// Object tags
+const (
+ varTag = 'V'
+ funcTag = 'F'
+ genericFuncTag = 'G'
+ constTag = 'C'
+ aliasTag = 'A'
+ genericAliasTag = 'B'
+ typeParamTag = 'P'
+ typeTag = 'T'
+ genericTypeTag = 'U'
+)
+
+// IImportData imports a package from the serialized package data
+// and returns 0 and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
+ pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil)
+ if err != nil {
+ return 0, nil, err
+ }
+ return 0, pkgs[0], nil
+}
+
+// IImportBundle imports a set of packages from the serialized package bundle.
+func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
+ return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil)
+}
+
+// A GetPackagesFunc function obtains the non-nil symbols for a set of
+// packages, creating and recursively importing them as needed. An
+// implementation should store each package symbol is in the Pkg
+// field of the items array.
+//
+// Any error causes importing to fail. This can be used to quickly read
+// the import manifest of an export data file without fully decoding it.
+type GetPackagesFunc = func(items []GetPackagesItem) error
+
+// A GetPackagesItem is a request from the importer for the package
+// symbol of the specified name and path.
+type GetPackagesItem struct {
+ Name, Path string
+ Pkg *types.Package // to be filled in by GetPackagesFunc call
+
+ // private importer state
+ pathOffset uint64
+ nameIndex map[string]uint64
+}
+
+// GetPackagesFromMap returns a GetPackagesFunc that retrieves
+// packages from the given map of package path to package.
+//
+// The returned function may mutate m: each requested package that is not
+// found is created with types.NewPackage and inserted into m.
+func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc {
+ return func(items []GetPackagesItem) error {
+ for i, item := range items {
+ pkg, ok := m[item.Path]
+ if !ok {
+ pkg = types.NewPackage(item.Path, item.Name)
+ m[item.Path] = pkg
+ }
+ items[i].Pkg = pkg
+ }
+ return nil
+ }
+}
+
+func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) {
+ const currentVersion = iexportVersionCurrent
+ version := int64(-1)
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if bundle {
+ err = fmt.Errorf("%v", e)
+ } else if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
+ }
+ }
+ }()
+ }
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ if bundle {
+ if v := r.uint64(); v != bundleVersion {
+ errorf("unknown bundle format version %d", v)
+ }
+ }
+
+ version = int64(r.uint64())
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGo1_18 {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
+ }
+
+ sLen := int64(r.uint64())
+ var fLen int64
+ var fileOffset []uint64
+ if shallow {
+ // Shallow mode uses a different position encoding.
+ fLen = int64(r.uint64())
+ fileOffset = make([]uint64, r.uint64())
+ for i := range fileOffset {
+ fileOffset[i] = r.uint64()
+ }
+ }
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ fileData := data[whence+sLen : whence+sLen+fLen]
+ declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
+ r.Seek(sLen+fLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ version: int(version),
+ ipath: path,
+ aliases: aliases.Enabled(),
+ shallow: shallow,
+ reportf: reportf,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ fileOffset: fileOffset,
+ fileData: fileData,
+ fileCache: make([]*token.File, len(fileOffset)),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name.
+ tparamIndex: make(map[ident]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+ }
+ defer p.fake.setLines() // set lines for files in fset
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ // Gather the relevant packages from the manifest.
+ items := make([]GetPackagesItem, r.uint64())
+ uniquePkgPaths := make(map[string]bool)
+ for i := range items {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ items[i].Name = pkgName
+ items[i].Path = pkgPath
+ items[i].pathOffset = pkgPathOff
+
+ // Read index for package.
+ nameIndex := make(map[string]uint64)
+ nSyms := r.uint64()
+ // In shallow mode, only the current package (i=0) has an index.
+ assert(!(shallow && i > 0 && nSyms != 0))
+ for ; nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ items[i].nameIndex = nameIndex
+
+ uniquePkgPaths[pkgPath] = true
+ }
+ // Debugging #63822; hypothesis: there are duplicate PkgPaths.
+ if len(uniquePkgPaths) != len(items) {
+ reportf("found duplicate PkgPaths while reading export data manifest: %v", items)
+ }
+
+ // Request packages all at once from the client,
+ // enabling a parallel implementation.
+ if err := getPackages(items); err != nil {
+ return nil, err // don't wrap this error
+ }
+
+ // Check the results and complete the index.
+ pkgList := make([]*types.Package, len(items))
+ for i, item := range items {
+ pkg := item.Pkg
+ if pkg == nil {
+ errorf("internal error: getPackages returned nil package for %q", item.Path)
+ } else if pkg.Path() != item.Path {
+ errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path)
+ } else if pkg.Name() != item.Name {
+ errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name)
+ }
+ p.pkgCache[item.pathOffset] = pkg
+ p.pkgIndex[pkg] = item.nameIndex
+ pkgList[i] = pkg
+ }
+
+ if bundle {
+ pkgs = make([]*types.Package, r.uint64())
+ for i := range pkgs {
+ pkg := p.pkgAt(r.uint64())
+ imps := make([]*types.Package, r.uint64())
+ for j := range imps {
+ imps[j] = p.pkgAt(r.uint64())
+ }
+ pkg.SetImports(imps)
+ pkgs[i] = pkg
+ }
+ } else {
+ if len(pkgList) == 0 {
+ errorf("no packages found for %s", path)
+ panic("unreachable")
+ }
+ pkgs = pkgList[:1]
+
+ // record all referenced packages as imports
+ list := slices.Clone(pkgList[1:])
+ sort.Sort(byPath(list))
+ pkgs[0].SetImports(list)
+ }
+
+ for _, pkg := range pkgs {
+ if pkg.Complete() {
+ continue
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[pkg]))
+ for name := range p.pkgIndex[pkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(pkg, name)
+ }
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+ }
+
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the typeParamTag case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ for _, typ := range p.instanceList {
+ if iface, _ := typ.Underlying().(*types.Interface); iface != nil {
+ iface.Complete()
+ }
+ }
+
+ return pkgs, nil
+}
+
+type setConstraintArgs struct {
+ t *types.TypeParam
+ constraint types.Type
+}
+
+type iimporter struct {
+ version int
+ ipath string
+
+ aliases bool
+ shallow bool
+ reportf ReportFunc // if non-nil, used to report bugs
+
+ stringData []byte
+ stringCache map[uint64]string
+ fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
+ fileData []byte
+ fileCache []*token.File // memoized decoding of file encoded as i
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+ tparamIndex map[ident]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+
+ // Workaround for the go/types bug golang/go#61561: instances produced during
+ // instantiation may contain incomplete interfaces. Here we only complete the
+ // underlying type of the instance, which is the most common case but doesn't
+ // handle parameterized interface literals defined deeper in the type.
+ instanceList []types.Type // instances for later completion (see golang/go#61561)
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
+
+ indent int // for tracing support
+}
+
+func (p *iimporter) trace(format string, args ...any) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ if debug {
+ p.trace("import decl %s", name)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", name)
+ }()
+ }
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ // In deep mode, the index should be complete. In shallow
+ // mode, we should have already recursively loaded necessary
+ // dependencies so the above Lookup succeeds.
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) fileAt(index uint64) *token.File {
+ file := p.fileCache[index]
+ if file == nil {
+ off := p.fileOffset[index]
+ file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
+ p.fileCache[index] = file
+ }
+ return file
+}
+
+func (p *iimporter) decodeFile(rd intReader) *token.File {
+ filename := p.stringAt(rd.uint64())
+ size := int(rd.uint64())
+ file := p.fake.fset.AddFile(filename, -1, size)
+
+ // SetLines requires a nondecreasing sequence.
+ // Because it is common for clients to derive the interval
+ // [start, start+len(name)] from a start position, and we
+ // want to ensure that the end offset is on the same line,
+ // we fill in the gaps of the sparse encoding with values
+ // that strictly increase by the largest possible amount.
+ // This allows us to avoid having to record the actual end
+ // offset of each needed line.
+
+ lines := make([]int, int(rd.uint64()))
+ var index, offset int
+ for i, n := 0, int(rd.uint64()); i < n; i++ {
+ index += int(rd.uint64())
+ offset += int(rd.uint64())
+ lines[index] = offset
+
+ // Ensure monotonicity between points.
+ for j := index - 1; j > 0 && lines[j] == 0; j-- {
+ lines[j] = lines[j+1] - 1
+ }
+ }
+
+ // Ensure monotonicity after last point.
+ for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
+ size--
+ lines[j] = size
+ }
+
+ if !file.SetLines(lines) {
+ errorf("SetLines failed: %d", lines) // can't happen
+ }
+ return file
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if canReuse(base, t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types.Named, rhs types.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := types.Unalias(rhs).(*types.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
+//
+// If TypeNames are not marked black (in the sense of go/types cycle
+// detection), they may be mutated when dot-imported. Fix this by punching a
+// hole through the type, when compiling with Go 1.23. (The bug has been fixed
+// for 1.24, but the fix was not worth back-porting).
+var markBlack = func(name *types.TypeName) {}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case aliasTag, genericAliasTag:
+ var tparams []*types.TypeParam
+ if tag == genericAliasTag {
+ tparams = r.tparamList()
+ }
+ typ := r.typ()
+ obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
+ markBlack(obj) // workaround for golang/go#69912
+ r.declare(obj)
+
+ case constTag:
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case funcTag, genericFuncTag:
+ var tparams []*types.TypeParam
+ if tag == genericFuncTag {
+ tparams = r.tparamList()
+ }
+ sig := r.signature(nil, nil, tparams)
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case typeTag, genericTypeTag:
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+
+ markBlack(obj) // workaround for golang/go#69912
+
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
+ r.declare(obj)
+ if tag == genericTypeTag {
+ tparams := r.tparamList()
+ named.SetTypeParams(tparams)
+ }
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ _, recvNamed := typesinternal.ReceiverNamed(recv)
+ targs := recvNamed.TypeArgs()
+ var rparams []*types.TypeParam
+ if targs.Len() > 0 {
+ rparams = make([]*types.TypeParam, targs.Len())
+ for i := range rparams {
+ rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam)
+ }
+ }
+ msig := r.signature(recv, rparams, nil)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case typeParamTag:
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0 := tparamName(name)
+ tn := types.NewTypeName(pos, r.currPkg, name0, nil)
+ t := types.NewTypeParam(tn, nil)
+
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg, name}
+ r.p.tparamIndex[id] = t
+ var implicit bool
+ if r.p.version >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ constraint := r.typ()
+ if implicit {
+ iface, _ := types.Unalias(constraint).(*types.Interface)
+ if iface == nil {
+ errorf("non-interface constraint marked implicit")
+ }
+ iface.MarkImplicit()
+ }
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
+ case varTag:
+ typ := r.typ()
+
+ v := types.NewVar(pos, r.currPkg, name, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ r.declare(v)
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+ if r.p.version >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind.
+ _ = constant.Kind(r.int64())
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ var x big.Int
+ r.mpint(&x, b)
+ val = constant.Make(&x)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types.Basic) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ io.ReadFull(&r.declReader, b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) mpfloat(typ *types.Basic) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ if r.p.shallow {
+ // precise offsets are encoded only in shallow mode
+ return r.posv2()
+ }
+ if r.p.version >= iexportVersionPosCol {
+ r.posv1()
+ } else {
+ r.posv0()
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ return token.NoPos
+ }
+ return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
+}
+
+func (r *importReader) posv0() {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+}
+
+func (r *importReader) posv1() {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevFile = r.string()
+ }
+ }
+}
+
+func (r *importReader) posv2() token.Pos {
+ file := r.uint64()
+ if file == 0 {
+ return token.NoPos
+ }
+ tf := r.p.fileAt(file - 1)
+ return tf.Pos(int(r.uint64()))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := types.Unalias(t).(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) (res types.Type) {
+ k := r.kind()
+ if debug {
+ r.p.trace("importing type %d (base: %v)", k, base)
+ r.p.indent++
+ defer func() {
+ r.p.indent--
+ r.p.trace("=> %s", res)
+ }()
+ }
+ switch k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case aliasType, definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil, nil, nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ var field *types.Var
+ if r.p.shallow {
+ field, _ = r.objectPathObject().(*types.Var)
+ }
+
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ // Either this is not a shallow import, the field is local, or the
+ // encoded objectPath failed to produce an object (a bug).
+ //
+ // Even in this last, buggy case, fall back on creating a new field. As
+ // discussed in iexport.go, this is not correct, but mostly works and is
+ // preferable to failing (for now at least).
+ if field == nil {
+ field = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ }
+
+ fields[i] = field
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ var method *types.Func
+ if r.p.shallow {
+ method, _ = r.objectPathObject().(*types.Func)
+ }
+
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+ msig := r.signature(recv, nil, nil)
+
+ if method == nil {
+ method = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+ methods[i] = method
+ }
+
+ typ := types.NewInterfaceType(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+
+ case typeParamType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg, name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instanceType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ // pos does not matter for instances: they are positioned on the original
+ // type.
+ _ = r.pos()
+ len := r.uint64()
+ targs := make([]types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ // TODO provide a non-nil *Environment
+ t, _ := types.Instantiate(nil, baseType, targs, false)
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ r.p.instanceList = append(r.p.instanceList, t)
+ return t
+
+ case unionType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*types.Term, r.uint64())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+// objectPathObject is the inverse of exportWriter.objectPath.
+//
+// In shallow mode, certain fields and methods may need to be looked up in an
+// imported package. See the doc for exportWriter.objectPath for a full
+// explanation.
+func (r *importReader) objectPathObject() types.Object {
+ objPath := objectpath.Path(r.string())
+ if objPath == "" {
+ return nil
+ }
+ pkg := r.pkg()
+ obj, err := objectpath.Object(pkg, objPath)
+ if err != nil {
+ if r.p.reportf != nil {
+ r.p.reportf("failed to find object for objectPath %q: %v", objPath, err)
+ }
+ }
+ return obj
+}
+
+func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*types.TypeParam {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types.TypeParam, n)
+ for i := range xs {
+ // Note: the standard library importer is tolerant of nil types here,
+ // though would panic in SetTypeParams.
+ xs[i] = types.Unalias(r.typ()).(*types.TypeParam)
+ }
+ return xs
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
new file mode 100644
index 0000000000..907c8557a5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
@@ -0,0 +1,91 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "go/types"
+ "sync"
+)
+
+// predecl is a cache for the predeclared types in types.Universe.
+//
+// Cache a distinct result based on the runtime value of any.
+// The pointer value of the any type varies based on GODEBUG settings.
+var predeclMu sync.Mutex
+var predecl map[types.Type][]types.Type
+
+func predeclared() []types.Type {
+ anyt := types.Universe.Lookup("any").Type()
+
+ predeclMu.Lock()
+ defer predeclMu.Unlock()
+
+ if pre, ok := predecl[anyt]; ok {
+ return pre
+ }
+
+ if predecl == nil {
+ predecl = make(map[types.Type][]types.Type)
+ }
+
+ decls := []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
+
+ // any
+ anyt,
+ }
+
+ predecl[anyt] = decls
+ return decls
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go
new file mode 100644
index 0000000000..4af810dc41
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
+func readArchiveHeader(b *bufio.Reader, name string) int {
+ // architecture-independent object file output
+ const HeaderSize = 60
+
+ var buf [HeaderSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
new file mode 100644
index 0000000000..37b4a39e9e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -0,0 +1,761 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Derived from go/internal/gcimporter/ureader.go
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sort"
+
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/pkgbits"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// A pkgReader holds the shared state for reading a unified IR package
+// description.
+type pkgReader struct {
+ pkgbits.PkgDecoder
+
+ fake fakeFileSet
+
+ ctxt *types.Context
+ imports map[string]*types.Package // previously imported packages, indexed by path
+ aliases bool // create types.Alias nodes
+
+ // lazily initialized arrays corresponding to the unified IR
+ // PosBase, Pkg, and Type sections, respectively.
+ posBases []string // position bases (i.e., file names)
+ pkgs []*types.Package
+ typs []types.Type
+
+ // laterFns holds functions that need to be invoked at the end of
+ // import reading.
+ laterFns []func()
+ // laterFors is used in case of 'type A B' to ensure that B is processed before A.
+ laterFors map[types.Type]int
+
+ // ifaces holds a list of constructed Interfaces, which need to have
+ // Complete called after importing is done.
+ ifaces []*types.Interface
+}
+
+// later adds a function to be invoked at the end of import reading.
+func (pr *pkgReader) later(fn func()) {
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// See cmd/compile/internal/noder.derivedInfo.
+type derivedInfo struct {
+ idx pkgbits.Index
+}
+
+// See cmd/compile/internal/noder.typeInfo.
+type typeInfo struct {
+ idx pkgbits.Index
+ derived bool
+}
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ if !debug {
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
+ }
+ }()
+ }
+
+ s := string(data)
+ input := pkgbits.NewPkgDecoder(path, s)
+ pkg = readUnifiedPackage(fset, nil, imports, input)
+ return
+}
+
+// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
+func (pr *pkgReader) laterFor(t types.Type, fn func()) {
+ if pr.laterFors == nil {
+ pr.laterFors = make(map[types.Type]int)
+ }
+ pr.laterFors[t] = len(pr.laterFns)
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// readUnifiedPackage reads a package description from the given
+// unified IR export data decoder.
+func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
+ pr := pkgReader{
+ PkgDecoder: input,
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+
+ ctxt: ctxt,
+ imports: imports,
+ aliases: aliases.Enabled(),
+
+ posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
+ typs: make([]types.Type, input.NumElems(pkgbits.RelocType)),
+ }
+ defer pr.fake.setLines()
+
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+ pkg := r.pkg()
+ if r.Version().Has(pkgbits.HasInit) {
+ r.Bool()
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ // As if r.obj(), but avoiding the Scope.Lookup call,
+ // to avoid eager loading of imports.
+ r.Sync(pkgbits.SyncObject)
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
+ r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ assert(r.Len() == 0)
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+
+ for _, fn := range pr.laterFns {
+ fn()
+ }
+
+ for _, iface := range pr.ifaces {
+ iface.Complete()
+ }
+
+ // Imports() of pkg are all of the transitive packages that were loaded.
+ var imps []*types.Package
+ for _, imp := range pr.pkgs {
+ if imp != nil && imp != pkg {
+ imps = append(imps, imp)
+ }
+ }
+ sort.Sort(byPath(imps))
+ pkg.SetImports(imps)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+// A reader holds the state for reading a single unified IR element
+// within a package.
+type reader struct {
+ pkgbits.Decoder
+
+ p *pkgReader
+
+ dict *readerDict
+}
+
+// A readerDict holds the state for type parameters that parameterize
+// the current unified IR element.
+type readerDict struct {
+ // bounds is a slice of typeInfos corresponding to the underlying
+ // bounds of the element's type parameters.
+ bounds []typeInfo
+
+ // tparams is a slice of the constructed TypeParams for the element.
+ tparams []*types.TypeParam
+
+ // derived is a slice of types derived from tparams, which may be
+ // instantiated while reading the current element.
+ derived []derivedInfo
+ derivedTypes []types.Type // lazily instantiated from derived
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.TempDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) retireReader(r *reader) {
+ pr.RetireDecoder(&r.Decoder)
+}
+
+// @@@ Positions
+
+func (r *reader) pos() token.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
+ return token.NoPos
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.Uint()
+ col := r.Uint()
+ return r.p.fake.pos(posBase, int(line), int(col))
+}
+
+func (r *reader) posBase() string {
+ return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
+}
+
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
+ if b := pr.posBases[idx]; b != "" {
+ return b
+ }
+
+ var filename string
+ {
+ r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+
+ // Within types2, position bases have a lot more details (e.g.,
+ // keeping track of where //line directives appeared exactly).
+ //
+ // For go/types, we just track the file name.
+
+ filename = r.String()
+
+ if r.Bool() { // file base
+ // Was: "b = token.NewTrimmedFileBase(filename, true)"
+ } else { // line base
+ pos := r.pos()
+ line := r.Uint()
+ col := r.Uint()
+
+ // Was: "b = token.NewLineBase(pos, filename, true, line, col)"
+ _, _, _ = pos, line, col
+ }
+ pr.retireReader(r)
+ }
+ b := filename
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Package {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Package {
+ path := r.String()
+ switch path {
+ // cmd/compile emits path="main" for main packages because
+ // that's the linker symbol prefix it used; but we need
+ // the package's path as it would be reported by go list,
+ // hence "main" below.
+ // See test at go/packages.TestMainPackagePathInModeTypes.
+ case "", "main":
+ path = r.p.PkgPath()
+ case "builtin":
+ return nil // universe
+ case "unsafe":
+ return types.Unsafe
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.String()
+
+ pkg := types.NewPackage(path, name)
+ r.p.imports[path] = pkg
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+ }
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
+ idx := info.idx
+ var where *types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ var typ types.Type
+ {
+ r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+ r.dict = dict
+
+ typ = r.doTyp()
+ assert(typ != nil)
+ pr.retireReader(r)
+ }
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader) doTyp() (res types.Type) {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+ default:
+ errorf("unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case pkgbits.TypeBasic:
+ return types.Typ[r.Len()]
+
+ case pkgbits.TypeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types.TypeName)
+ if len(targs) != 0 {
+ t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
+ return t
+ }
+ return name.Type()
+
+ case pkgbits.TypeTypeParam:
+ return r.dict.tparams[r.Len()]
+
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
+ return types.NewArray(r.typ(), len)
+ case pkgbits.TypeChan:
+ dir := types.ChanDir(r.Len())
+ return types.NewChan(dir, r.typ())
+ case pkgbits.TypeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case pkgbits.TypePointer:
+ return types.NewPointer(r.typ())
+ case pkgbits.TypeSignature:
+ return r.signature(nil, nil, nil)
+ case pkgbits.TypeSlice:
+ return types.NewSlice(r.typ())
+ case pkgbits.TypeStruct:
+ return r.structType()
+ case pkgbits.TypeInterface:
+ return r.interfaceType()
+ case pkgbits.TypeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader) structType() *types.Struct {
+ fields := make([]*types.Var, r.Len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.String()
+ embedded := r.Bool()
+
+ fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types.NewStruct(fields, tags)
+}
+
+func (r *reader) unionType() *types.Union {
+ terms := make([]*types.Term, r.Len())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.Bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+}
+
+func (r *reader) interfaceType() *types.Interface {
+ methods := make([]*types.Func, r.Len())
+ embeddeds := make([]types.Type, r.Len())
+ implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil, nil, nil)
+ methods[i] = types.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ iface := types.NewInterfaceType(methods, embeddeds)
+ if implicit {
+ iface.MarkImplicit()
+ }
+
+ // We need to call iface.Complete(), but if there are any embedded
+ // defined types, then we may not have set their underlying
+ // interface type yet. So we need to defer calling Complete until
+ // after we've called SetUnderlying everywhere.
+ //
+ // TODO(mdempsky): After CL 424876 lands, it should be safe to call
+ // iface.Complete() immediately.
+ r.p.ifaces = append(r.p.ifaces, iface)
+
+ return iface
+}
+
+func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
+ r.Sync(pkgbits.SyncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.Bool()
+
+ return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader) params() *types.Tuple {
+ r.Sync(pkgbits.SyncParams)
+
+ params := make([]*types.Var, r.Len())
+ for i := range params {
+ params[i] = r.param()
+ }
+
+ return types.NewTuple(params...)
+}
+
+func (r *reader) param() *types.Var {
+ r.Sync(pkgbits.SyncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader) obj() (types.Object, []types.Type) {
+ r.Sync(pkgbits.SyncObject)
+
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
+
+ pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ obj := pkgScope(pkg).Lookup(name)
+
+ targs := make([]types.Type, r.Len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
+
+ var objPkg *types.Package
+ var objName string
+ var tag pkgbits.CodeObj
+ {
+ rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+
+ objPkg, objName = rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+ pr.retireReader(rname)
+ }
+
+ if tag == pkgbits.ObjStub {
+ assert(objPkg == nil || objPkg == types.Unsafe)
+ return objPkg, objName
+ }
+
+ // Ignore local types promoted to global scope (#55110).
+ if _, suffix := splitVargenSuffix(objName); suffix != "" {
+ return objPkg, objName
+ }
+
+ if objPkg.Scope().Lookup(objName) == nil {
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ r.dict = dict
+
+ declare := func(obj types.Object) {
+ objPkg.Scope().Insert(obj)
+ }
+
+ switch tag {
+ default:
+ panic("weird")
+
+ case pkgbits.ObjAlias:
+ pos := r.pos()
+ var tparams []*types.TypeParam
+ if r.Version().Has(pkgbits.AliasTypeParamNames) {
+ tparams = r.typeParamNames()
+ }
+ typ := r.typ()
+ declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
+
+ case pkgbits.ObjConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := r.Value()
+ declare(types.NewConst(pos, objPkg, objName, typ, val))
+
+ case pkgbits.ObjFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil, nil, tparams)
+ declare(types.NewFunc(pos, objPkg, objName, sig))
+
+ case pkgbits.ObjType:
+ pos := r.pos()
+
+ obj := types.NewTypeName(pos, objPkg, objName, nil)
+ named := types.NewNamed(obj, nil, nil)
+ declare(obj)
+
+ named.SetTypeParams(r.typeParamNames())
+
+ setUnderlying := func(underlying types.Type) {
+ // If the underlying type is an interface, we need to
+ // duplicate its methods so we can replace the receiver
+ // parameter's type (#49906).
+ if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ methods := make([]*types.Func, iface.NumExplicitMethods())
+ for i := range methods {
+ fn := iface.ExplicitMethod(i)
+ sig := fn.Type().(*types.Signature)
+
+ recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+ typesinternal.SetVarKind(recv, typesinternal.RecvVar)
+ methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic()))
+ }
+
+ embeds := make([]types.Type, iface.NumEmbeddeds())
+ for i := range embeds {
+ embeds[i] = iface.EmbeddedType(i)
+ }
+
+ newIface := types.NewInterfaceType(methods, embeds)
+ r.p.ifaces = append(r.p.ifaces, newIface)
+ underlying = newIface
+ }
+
+ named.SetUnderlying(underlying)
+ }
+
+ // Since go.dev/cl/455279, we can assume rhs.Underlying() will
+ // always be non-nil. However, to temporarily support users of
+ // older snapshot releases, we continue to fallback to the old
+ // behavior for now.
+ //
+ // TODO(mdempsky): Remove fallback code and simplify after
+ // allowing time for snapshot users to upgrade.
+ rhs := r.typ()
+ if underlying := rhs.Underlying(); underlying != nil {
+ setUnderlying(underlying)
+ } else {
+ pk := r.p
+ pk.laterFor(named, func() {
+ // First be sure that the rhs is initialized, if it needs to be initialized.
+ delete(pk.laterFors, named) // prevent cycles
+ if i, ok := pk.laterFors[rhs]; ok {
+ f := pk.laterFns[i]
+ pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
+ f() // initialize RHS
+ }
+ setUnderlying(rhs.Underlying())
+ })
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ named.AddMethod(r.method())
+ }
+
+ case pkgbits.ObjVar:
+ pos := r.pos()
+ typ := r.typ()
+ v := types.NewVar(pos, objPkg, objName, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ declare(v)
+ }
+ }
+
+ return objPkg, objName
+}
+
+func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
+
+ var dict readerDict
+
+ {
+ r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+ if implicits := r.Len(); implicits != 0 {
+ errorf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.Len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.Len())
+ dict.derivedTypes = make([]types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
+ if r.Version().Has(pkgbits.DerivedInfoNeeded) {
+ assert(!r.Bool())
+ }
+ }
+
+ pr.retireReader(r)
+ }
+ // function references follow, but reader doesn't need those
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() []*types.TypeParam {
+ r.Sync(pkgbits.SyncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ tname := types.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = types.NewTypeParam(tname, nil)
+ }
+
+ typs := make([]types.Type, len(r.dict.bounds))
+ for i, bound := range r.dict.bounds {
+ typs[i] = r.p.typIdx(bound, r.dict)
+ }
+
+ // TODO(mdempsky): This is subtle, elaborate further.
+ //
+ // We have to save tparams outside of the closure, because
+ // typeParamNames() can be called multiple times with the same
+ // dictionary instance.
+ //
+ // Also, this needs to happen later to make sure SetUnderlying has
+ // been called.
+ //
+ // TODO(mdempsky): Is it safe to have a single "later" slice or do
+ // we need to have multiple passes? See comments on CL 386002 and
+ // go.dev/issue/52104.
+ tparams := r.dict.tparams
+ r.p.later(func() {
+ for i, typ := range typs {
+ tparams[i].SetConstraint(typ)
+ }
+ })
+
+ return r.dict.tparams
+}
+
+func (r *reader) method() *types.Func {
+ r.Sync(pkgbits.SyncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rparams := r.typeParamNames()
+ sig := r.signature(r.param(), rparams, nil)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) }
+
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
+ r.Sync(marker)
+ return r.pkg(), r.String()
+}
+
+// pkgScope returns pkg.Scope().
+// If pkg is nil, it returns types.Universe instead.
+//
+// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
+func pkgScope(pkg *types.Package) *types.Scope {
+ if pkg != nil {
+ return pkg.Scope()
+ }
+ return types.Universe
+}
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
new file mode 100644
index 0000000000..58721202de
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -0,0 +1,567 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gocommand is a helper for calling the go command.
+package gocommand
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// A Runner will run go command invocations and serialize
+// them if it sees a concurrency error.
+type Runner struct {
+ // once guards the runner initialization.
+ once sync.Once
+
+ // inFlight tracks available workers.
+ inFlight chan struct{}
+
+ // serialized guards the ability to run a go command serially,
+ // to avoid deadlocks when claiming workers.
+ serialized chan struct{}
+}
+
+const maxInFlight = 10
+
+func (runner *Runner) initialize() {
+ runner.once.Do(func() {
+ runner.inFlight = make(chan struct{}, maxInFlight)
+ runner.serialized = make(chan struct{}, 1)
+ })
+}
+
+// 1.13: go: updates to go.mod needed, but contents have changed
+// 1.14: go: updating go.mod: existing contents have changed since last read
+var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
+
+// event keys for go command invocations
+var (
+ verb = keys.NewString("verb", "go command verb")
+ directory = keys.NewString("directory", "")
+)
+
+func invLabels(inv Invocation) []label.Label {
+ return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)}
+}
+
+// Run is a convenience wrapper around RunRaw.
+// It returns only stdout and a "friendly" error.
+func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
+ defer done()
+
+ stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
+ return stdout, friendly
+}
+
+// RunPiped runs the invocation serially, always waiting for any concurrent
+// invocations to complete first.
+func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
+ defer done()
+
+ _, err := runner.runPiped(ctx, inv, stdout, stderr)
+ return err
+}
+
+// RunRaw runs the invocation, serializing requests only if they fight over
+// go.mod changes.
+// Postcondition: both error results have same nilness.
+func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
+ defer done()
+ // Make sure the runner is always initialized.
+ runner.initialize()
+
+ // First, try to run the go command concurrently.
+ stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv)
+
+ // If we encounter a load concurrency error, we need to retry serially.
+ if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) {
+ event.Error(ctx, "Load concurrency error, will retry serially", err)
+
+ // Run serially by calling runPiped.
+ stdout.Reset()
+ stderr.Reset()
+ friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr)
+ }
+
+ return stdout, stderr, friendlyErr, err
+}
+
+// Postcondition: both error results have same nilness.
+func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ // Wait for 1 worker to become available.
+ select {
+ case <-ctx.Done():
+ return nil, nil, ctx.Err(), ctx.Err()
+ case runner.inFlight <- struct{}{}:
+ defer func() { <-runner.inFlight }()
+ }
+
+ stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
+ friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
+ return stdout, stderr, friendlyErr, err
+}
+
+// Postcondition: both error results have same nilness.
+func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
+ // Make sure the runner is always initialized.
+ runner.initialize()
+
+ // Acquire the serialization lock. This avoids deadlocks between two
+ // runPiped commands.
+ select {
+ case <-ctx.Done():
+ return ctx.Err(), ctx.Err()
+ case runner.serialized <- struct{}{}:
+ defer func() { <-runner.serialized }()
+ }
+
+ // Wait for all in-progress go commands to return before proceeding,
+ // to avoid load concurrency errors.
+ for range maxInFlight {
+ select {
+ case <-ctx.Done():
+ return ctx.Err(), ctx.Err()
+ case runner.inFlight <- struct{}{}:
+ // Make sure we always "return" any workers we took.
+ defer func() { <-runner.inFlight }()
+ }
+ }
+
+ return inv.runWithFriendlyError(ctx, stdout, stderr)
+}
+
+// An Invocation represents a call to the go command.
+type Invocation struct {
+ Verb string
+ Args []string
+ BuildFlags []string
+
+ // If ModFlag is set, the go command is invoked with -mod=ModFlag.
+ // TODO(rfindley): remove, in favor of Args.
+ ModFlag string
+
+ // If ModFile is set, the go command is invoked with -modfile=ModFile.
+ // TODO(rfindley): remove, in favor of Args.
+ ModFile string
+
+ // Overlay is the name of the JSON overlay file that describes
+ // unsaved editor buffers; see [WriteOverlays].
+ // If set, the go command is invoked with -overlay=Overlay.
+ // TODO(rfindley): remove, in favor of Args.
+ Overlay string
+
+ // If CleanEnv is set, the invocation will run only with the environment
+ // in Env, not starting with os.Environ.
+ CleanEnv bool
+ Env []string
+ WorkingDir string
+ Logf func(format string, args ...any)
+}
+
+// Postcondition: both error results have same nilness.
+func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) {
+ rawError = i.run(ctx, stdout, stderr)
+ if rawError != nil {
+ friendlyError = rawError
+ // Check for 'go' executable not being found.
+ if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ friendlyError = fmt.Errorf("go command required, not found: %v", ee)
+ }
+ if ctx.Err() != nil {
+ friendlyError = ctx.Err()
+ }
+ friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr)
+ }
+ return
+}
+
+// logf logs if i.Logf is non-nil.
+func (i *Invocation) logf(format string, args ...any) {
+ if i.Logf != nil {
+ i.Logf(format, args...)
+ }
+}
+
+func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
+ goArgs := []string{i.Verb}
+
+ appendModFile := func() {
+ if i.ModFile != "" {
+ goArgs = append(goArgs, "-modfile="+i.ModFile)
+ }
+ }
+ appendModFlag := func() {
+ if i.ModFlag != "" {
+ goArgs = append(goArgs, "-mod="+i.ModFlag)
+ }
+ }
+ appendOverlayFlag := func() {
+ if i.Overlay != "" {
+ goArgs = append(goArgs, "-overlay="+i.Overlay)
+ }
+ }
+
+ switch i.Verb {
+ case "env", "version":
+ goArgs = append(goArgs, i.Args...)
+ case "mod":
+ // mod needs the sub-verb before flags.
+ goArgs = append(goArgs, i.Args[0])
+ appendModFile()
+ goArgs = append(goArgs, i.Args[1:]...)
+ case "get":
+ goArgs = append(goArgs, i.BuildFlags...)
+ appendModFile()
+ goArgs = append(goArgs, i.Args...)
+
+ default: // notably list and build.
+ goArgs = append(goArgs, i.BuildFlags...)
+ appendModFile()
+ appendModFlag()
+ appendOverlayFlag()
+ goArgs = append(goArgs, i.Args...)
+ }
+ cmd := exec.Command("go", goArgs...)
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+
+ // https://go.dev/issue/59541: don't wait forever copying stderr
+ // after the command has exited.
+ // After CL 484741 we copy stdout manually, so we we'll stop reading that as
+ // soon as ctx is done. However, we also don't want to wait around forever
+ // for stderr. Give a much-longer-than-reasonable delay and then assume that
+ // something has wedged in the kernel or runtime.
+ cmd.WaitDelay = 30 * time.Second
+
+ // The cwd gets resolved to the real path. On Darwin, where
+ // /tmp is a symlink, this breaks anything that expects the
+ // working directory to keep the original path, including the
+ // go command when dealing with modules.
+ //
+ // os.Getwd has a special feature where if the cwd and the PWD
+ // are the same node then it trusts the PWD, so by setting it
+ // in the env for the child process we fix up all the paths
+ // returned by the go command.
+ if !i.CleanEnv {
+ cmd.Env = os.Environ()
+ }
+ cmd.Env = append(cmd.Env, i.Env...)
+ if i.WorkingDir != "" {
+ cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
+ cmd.Dir = i.WorkingDir
+ }
+
+ debugStr := cmdDebugStr(cmd)
+ i.logf("starting %v", debugStr)
+ start := time.Now()
+ defer func() {
+ i.logf("%s for %v", time.Since(start), debugStr)
+ }()
+
+ return runCmdContext(ctx, cmd)
+}
+
+// DebugHangingGoCommands may be set by tests to enable additional
+// instrumentation (including panics) for debugging hanging Go commands.
+//
+// See golang/go#54461 for details.
+var DebugHangingGoCommands = false
+
+// runCmdContext is like exec.CommandContext except it sends os.Interrupt
+// before os.Kill.
+func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
+ // If cmd.Stdout is not an *os.File, the exec package will create a pipe and
+ // copy it to the Writer in a goroutine until the process has finished and
+ // either the pipe reaches EOF or command's WaitDelay expires.
+ //
+ // However, the output from 'go list' can be quite large, and we don't want to
+ // keep reading (and allocating buffers) if we've already decided we don't
+ // care about the output. We don't want to wait for the process to finish, and
+ // we don't wait to wait for the WaitDelay to expire either.
+ //
+ // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace
+ // it with a pipe (which is an *os.File), which we can close in order to stop
+ // copying output as soon as we realize we don't care about it.
+ var stdoutW *os.File
+ if cmd.Stdout != nil {
+ if _, ok := cmd.Stdout.(*os.File); !ok {
+ var stdoutR *os.File
+ stdoutR, stdoutW, err = os.Pipe()
+ if err != nil {
+ return err
+ }
+ prevStdout := cmd.Stdout
+ cmd.Stdout = stdoutW
+
+ stdoutErr := make(chan error, 1)
+ go func() {
+ _, err := io.Copy(prevStdout, stdoutR)
+ if err != nil {
+ err = fmt.Errorf("copying stdout: %w", err)
+ }
+ stdoutErr <- err
+ }()
+ defer func() {
+ // We started a goroutine to copy a stdout pipe.
+ // Wait for it to finish, or terminate it if need be.
+ var err2 error
+ select {
+ case err2 = <-stdoutErr:
+ stdoutR.Close()
+ case <-ctx.Done():
+ stdoutR.Close()
+ // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close
+ // should cause the Read call in io.Copy to unblock and return
+ // immediately, but we still need to receive from stdoutErr to confirm
+ // that it has happened.
+ <-stdoutErr
+ err2 = ctx.Err()
+ }
+ if err == nil {
+ err = err2
+ }
+ }()
+
+ // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the
+ // same writer, and have a type that can be compared with ==, at most
+ // one goroutine at a time will call Write.”
+ //
+ // Since we're starting a goroutine that writes to cmd.Stdout, we must
+ // also update cmd.Stderr so that it still holds.
+ func() {
+ defer func() { recover() }()
+ if cmd.Stderr == prevStdout {
+ cmd.Stderr = cmd.Stdout
+ }
+ }()
+ }
+ }
+
+ startTime := time.Now()
+ err = cmd.Start()
+ if stdoutW != nil {
+ // The child process has inherited the pipe file,
+ // so close the copy held in this process.
+ stdoutW.Close()
+ stdoutW = nil
+ }
+ if err != nil {
+ return err
+ }
+
+ resChan := make(chan error, 1)
+ go func() {
+ resChan <- cmd.Wait()
+ }()
+
+ // If we're interested in debugging hanging Go commands, stop waiting after a
+ // minute and panic with interesting information.
+ debug := DebugHangingGoCommands
+ if debug {
+ timer := time.NewTimer(1 * time.Minute)
+ defer timer.Stop()
+ select {
+ case err := <-resChan:
+ return err
+ case <-timer.C:
+ // HandleHangingGoCommand terminates this process.
+ // Pass off resChan in case we can collect the command error.
+ handleHangingGoCommand(startTime, cmd, resChan)
+ case <-ctx.Done():
+ }
+ } else {
+ select {
+ case err := <-resChan:
+ return err
+ case <-ctx.Done():
+ }
+ }
+
+ // Cancelled. Interrupt and see if it ends voluntarily.
+ if err := cmd.Process.Signal(os.Interrupt); err == nil {
+ // (We used to wait only 1s but this proved
+ // fragile on loaded builder machines.)
+ timer := time.NewTimer(5 * time.Second)
+ defer timer.Stop()
+ select {
+ case err := <-resChan:
+ return err
+ case <-timer.C:
+ }
+ }
+
+ // Didn't shut down in response to interrupt. Kill it hard.
+ if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
+ log.Printf("error killing the Go command: %v", err)
+ }
+
+ return <-resChan
+}
+
+// handleHangingGoCommand outputs debugging information to help diagnose the
+// cause of a hanging Go command, and then exits with log.Fatalf.
+func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) {
+ switch runtime.GOOS {
+ case "linux", "darwin", "freebsd", "netbsd", "openbsd":
+ fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
+
+ The gopls test runner has detected a hanging go command. In order to debug
+ this, the output of ps and lsof/fstat is printed below.
+
+ See golang/go#54461 for more details.`)
+
+ fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
+ fmt.Fprintln(os.Stderr, "-------------------------")
+ psCmd := exec.Command("ps", "axo", "ppid,pid,command")
+ psCmd.Stdout = os.Stderr
+ psCmd.Stderr = os.Stderr
+ if err := psCmd.Run(); err != nil {
+ log.Printf("Handling hanging Go command: running ps: %v", err)
+ }
+
+ listFiles := "lsof"
+ if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
+ listFiles = "fstat"
+ }
+
+ fmt.Fprintln(os.Stderr, "\n"+listFiles+":")
+ fmt.Fprintln(os.Stderr, "-----")
+ listFilesCmd := exec.Command(listFiles)
+ listFilesCmd.Stdout = os.Stderr
+ listFilesCmd.Stderr = os.Stderr
+ if err := listFilesCmd.Run(); err != nil {
+ log.Printf("Handling hanging Go command: running %s: %v", listFiles, err)
+ }
+ // Try to extract information about the slow go process by issuing a SIGQUIT.
+ if err := cmd.Process.Signal(sigStuckProcess); err == nil {
+ select {
+ case err := <-resChan:
+ stderr := "not a bytes.Buffer"
+ if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil {
+ stderr = buf.String()
+ }
+ log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr)
+ case <-time.After(5 * time.Second):
+ }
+ } else {
+ log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err)
+ }
+ }
+ log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.SplitN(kv, "=", 2)
+ if len(split) == 2 {
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+ }
+
+ var args []string
+ for _, arg := range cmd.Args {
+ quoted := strconv.Quote(arg)
+ if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
+ args = append(args, quoted)
+ } else {
+ args = append(args, arg)
+ }
+ }
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
+}
+
+// WriteOverlays writes each value in the overlay (see the Overlay
+// field of go/packages.Config) to a temporary file and returns the name
+// of a JSON file describing the mapping that is suitable for the "go
+// list -overlay" flag.
+//
+// On success, the caller must call the cleanup function exactly once
+// when the files are no longer needed.
+func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) {
+ // Do nothing if there are no overlays in the config.
+ if len(overlay) == 0 {
+ return "", func() {}, nil
+ }
+
+ dir, err := os.MkdirTemp("", "gocommand-*")
+ if err != nil {
+ return "", nil, err
+ }
+
+ // The caller must clean up this directory,
+ // unless this function returns an error.
+ // (The cleanup operand of each return
+ // statement below is ignored.)
+ defer func() {
+ cleanup = func() {
+ os.RemoveAll(dir)
+ }
+ if err != nil {
+ cleanup()
+ cleanup = nil
+ }
+ }()
+
+ // Write each map entry to a temporary file.
+ overlays := make(map[string]string)
+ for k, v := range overlay {
+ // Use a unique basename for each file (001-foo.go),
+ // to avoid creating nested directories.
+ base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k))
+ filename := filepath.Join(dir, base)
+ err := os.WriteFile(filename, v, 0666)
+ if err != nil {
+ return "", nil, err
+ }
+ overlays[k] = filename
+ }
+
+ // Write the JSON overlay file that maps logical file names to temp files.
+ //
+ // OverlayJSON is the format overlay files are expected to be in.
+ // The Replace map maps from overlaid paths to replacement paths:
+ // the Go command will forward all reads trying to open
+ // each overlaid path to its replacement path, or consider the overlaid
+ // path not to exist if the replacement path is empty.
+ //
+ // From golang/go#39958.
+ type OverlayJSON struct {
+ Replace map[string]string `json:"replace,omitempty"`
+ }
+ b, err := json.Marshal(OverlayJSON{Replace: overlays})
+ if err != nil {
+ return "", nil, err
+ }
+ filename = filepath.Join(dir, "overlay.json")
+ if err := os.WriteFile(filename, b, 0666); err != nil {
+ return "", nil, err
+ }
+
+ return filename, nil, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
new file mode 100644
index 0000000000..469c648e4d
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package gocommand
+
+import "os"
+
+// sigStuckProcess is the signal to send to kill a hanging subprocess.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var sigStuckProcess = os.Kill
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
new file mode 100644
index 0000000000..169d37c8e9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package gocommand
+
+import "syscall"
+
+// Sigstuckprocess is the signal to send to kill a hanging subprocess.
+// Send SIGQUIT to get a stack trace.
+var sigStuckProcess = syscall.SIGQUIT
diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go
new file mode 100644
index 0000000000..e38d1fb488
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go
@@ -0,0 +1,163 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocommand
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "golang.org/x/mod/semver"
+)
+
+// ModuleJSON holds information about a module.
+type ModuleJSON struct {
+ Path string // module path
+ Version string // module version
+ Versions []string // available module versions (with -versions)
+ Replace *ModuleJSON // replaced by this module
+ Time *time.Time // time version was created
+ Update *ModuleJSON // available update, if any (with -u)
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file used when loading this module, if any
+ GoVersion string // go version used in module
+}
+
+var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
+
+// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands
+// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
+// of which only Verb and Args are modified to run the appropriate Go command.
+// Inspired by setDefaultBuildMod in modload/init.go
+func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) {
+ mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
+ if err != nil {
+ return false, nil, err
+ }
+
+ // We check the GOFLAGS to see if there is anything overridden or not.
+ inv.Verb = "env"
+ inv.Args = []string{"GOFLAGS"}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return false, nil, err
+ }
+ goflags := string(bytes.TrimSpace(stdout.Bytes()))
+ matches := modFlagRegexp.FindStringSubmatch(goflags)
+ var modFlag string
+ if len(matches) != 0 {
+ modFlag = matches[1]
+ }
+ // Don't override an explicit '-mod=' argument.
+ if modFlag == "vendor" {
+ return true, mainMod, nil
+ } else if modFlag != "" {
+ return false, nil, nil
+ }
+ if mainMod == nil || !go114 {
+ return false, nil, nil
+ }
+ // Check 1.14's automatic vendor mode.
+ if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
+ if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
+ // The Go version is at least 1.14, and a vendor directory exists.
+ // Set -mod=vendor by default.
+ return true, mainMod, nil
+ }
+ }
+ return false, nil, nil
+}
+
+// getMainModuleAnd114 gets one of the main modules' information and whether the
+// go command in use is 1.14+. This is the information needed to figure out
+// if vendoring should be enabled.
+func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
+ const format = `{{.Path}}
+{{.Dir}}
+{{.GoMod}}
+{{.GoVersion}}
+{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
+`
+ inv.Verb = "list"
+ inv.Args = []string{"-m", "-f", format}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return nil, false, err
+ }
+
+ lines := strings.Split(stdout.String(), "\n")
+ if len(lines) < 5 {
+ return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String())
+ }
+ mod := &ModuleJSON{
+ Path: lines[0],
+ Dir: lines[1],
+ GoMod: lines[2],
+ GoVersion: lines[3],
+ Main: true,
+ }
+ return mod, lines[4] == "go1.14", nil
+}
+
+// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands
+// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
+// of which only Verb and Args are modified to run the appropriate Go command.
+// Inspired by setDefaultBuildMod in modload/init.go
+func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) {
+ inv.Verb = "env"
+ inv.Args = []string{"GOWORK"}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return false, nil, err
+ }
+ goWork := string(bytes.TrimSpace(stdout.Bytes()))
+ if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() {
+ mainMods, err := getWorkspaceMainModules(ctx, inv, r)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, mainMods, nil
+ }
+ return false, nil, nil
+}
+
+// getWorkspaceMainModules gets the main modules' information.
+// This is the information needed to figure out if vendoring should be enabled.
+func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) {
+ const format = `{{.Path}}
+{{.Dir}}
+{{.GoMod}}
+{{.GoVersion}}
+`
+ inv.Verb = "list"
+ inv.Args = []string{"-m", "-f", format}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return nil, err
+ }
+
+ lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n")
+ if len(lines) < 4 {
+ return nil, fmt.Errorf("unexpected stdout: %q", stdout.String())
+ }
+ mods := make([]*ModuleJSON, 0, len(lines)/4)
+ for i := 0; i < len(lines); i += 4 {
+ mods = append(mods, &ModuleJSON{
+ Path: lines[i],
+ Dir: lines[i+1],
+ GoMod: lines[i+2],
+ GoVersion: lines[i+3],
+ Main: true,
+ })
+ }
+ return mods, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go
new file mode 100644
index 0000000000..446c5846a6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/version.go
@@ -0,0 +1,71 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocommand
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// GoVersion reports the minor version number of the highest release
+// tag built into the go command on the PATH.
+//
+// Note that this may be higher than the version of the go tool used
+// to build this application, and thus the versions of the standard
+// go/{scanner,parser,ast,types} packages that are linked into it.
+// In that case, callers should either downgrade to the version of
+// go used to build the application, or report an error that the
+// application is too old to use the go command on the PATH.
+func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
+ inv.Verb = "list"
+ inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
+ inv.BuildFlags = nil // This is not a build command.
+ inv.ModFlag = ""
+ inv.ModFile = ""
+ inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off")
+
+ stdoutBytes, err := r.Run(ctx, inv)
+ if err != nil {
+ return 0, err
+ }
+ stdout := stdoutBytes.String()
+ if len(stdout) < 3 {
+ return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
+ }
+ // Split up "[go1.1 go1.15]" and return highest go1.X value.
+ tags := strings.Fields(stdout[1 : len(stdout)-2])
+ for i := len(tags) - 1; i >= 0; i-- {
+ var version int
+ if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil {
+ continue
+ }
+ return version, nil
+ }
+ return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
+}
+
+// GoVersionOutput returns the complete output of the go version command.
+func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
+ inv.Verb = "version"
+ goVersion, err := r.Run(ctx, inv)
+ if err != nil {
+ return "", err
+ }
+ return goVersion.String(), nil
+}
+
+// ParseGoVersionOutput extracts the Go version string
+// from the output of the "go version" command.
+// Given an unrecognized form, it returns an empty string.
+func ParseGoVersionOutput(data string) string {
+ re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
+ m := re.FindStringSubmatch(data)
+ if len(m) != 2 {
+ return "" // unrecognized version
+ }
+ return m[1]
+}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
new file mode 100644
index 0000000000..929b470beb
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packagesinternal exposes internal-only fields from go/packages.
+package packagesinternal
+
+import "fmt"
+
+var GetDepsErrors = func(p any) []*PackageError { return nil }
+
+type PackageError struct {
+ ImportStack []string // shortest path from package named on command line to this one
+ Pos string // position of error (if present, file:line:col)
+ Err string // the error itself
+}
+
+func (err PackageError) String() string {
+ return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack)
+}
+
+var TypecheckCgo int
+var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/internal/pkgbits/codes.go
new file mode 100644
index 0000000000..f0cabde96e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/codes.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A Code is an enum value that can be encoded into bitstreams.
+//
+// Code types are preferable for enum types, because they allow
+// Decoder to detect desyncs.
+type Code interface {
+ // Marker returns the SyncMarker for the Code's dynamic type.
+ Marker() SyncMarker
+
+ // Value returns the Code's ordinal value.
+ Value() int
+}
+
+// A CodeVal distinguishes among go/constant.Value encodings.
+type CodeVal int
+
+func (c CodeVal) Marker() SyncMarker { return SyncVal }
+func (c CodeVal) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ValBool CodeVal = iota
+ ValString
+ ValInt64
+ ValBigInt
+ ValBigRat
+ ValBigFloat
+)
+
+// A CodeType distinguishes among go/types.Type encodings.
+type CodeType int
+
+func (c CodeType) Marker() SyncMarker { return SyncType }
+func (c CodeType) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ TypeBasic CodeType = iota
+ TypeNamed
+ TypePointer
+ TypeSlice
+ TypeArray
+ TypeChan
+ TypeMap
+ TypeSignature
+ TypeStruct
+ TypeInterface
+ TypeUnion
+ TypeTypeParam
+)
+
+// A CodeObj distinguishes among go/types.Object encodings.
+type CodeObj int
+
+func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
+func (c CodeObj) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ObjAlias CodeObj = iota
+ ObjConst
+ ObjType
+ ObjFunc
+ ObjVar
+ ObjStub
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
new file mode 100644
index 0000000000..c0aba26c48
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -0,0 +1,519 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "io"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// A PkgDecoder provides methods for decoding a package's Unified IR
+// export data.
+type PkgDecoder struct {
+ // version is the file format version.
+ version Version
+
+ // sync indicates whether the file uses sync markers.
+ sync bool
+
+ // pkgPath is the package path for the package to be decoded.
+ //
+ // TODO(mdempsky): Remove; unneeded since CL 391014.
+ pkgPath string
+
+ // elemData is the full data payload of the encoded package.
+ // Elements are densely and contiguously packed together.
+ //
+ // The last 8 bytes of elemData are the package fingerprint.
+ elemData string
+
+ // elemEnds stores the byte-offset end positions of element
+ // bitstreams within elemData.
+ //
+ // For example, element I's bitstream data starts at elemEnds[I-1]
+ // (or 0, if I==0) and ends at elemEnds[I].
+ //
+ // Note: elemEnds is indexed by absolute indices, not
+ // section-relative indices.
+ elemEnds []uint32
+
+ // elemEndsEnds stores the index-offset end positions of relocation
+ // sections within elemEnds.
+ //
+ // For example, section K's end positions start at elemEndsEnds[K-1]
+ // (or 0, if K==0) and end at elemEndsEnds[K].
+ elemEndsEnds [numRelocs]uint32
+
+ scratchRelocEnt []RelocEnt
+}
+
+// PkgPath returns the package path for the package
+//
+// TODO(mdempsky): Remove; unneeded since CL 391014.
+func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
+
+// SyncMarkers reports whether pr uses sync markers.
+func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
+
+// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
+// IR export data from input. pkgPath is the package path for the
+// compilation unit that produced the export data.
+func NewPkgDecoder(pkgPath, input string) PkgDecoder {
+ pr := PkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ var ver uint32
+ assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
+ pr.version = Version(ver)
+
+ if pr.version >= numVersions {
+ panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
+ }
+
+ if pr.version.Has(Flags) {
+ var flags uint32
+ assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
+ pr.sync = flags&flagSyncMarkers != 0
+ }
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, io.SeekCurrent)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+
+ const fingerprintSize = 8
+ assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+// NumElems returns the number of elements in section k.
+func (pr *PkgDecoder) NumElems(k RelocKind) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+// TotalElems returns the total number of elements across all sections.
+func (pr *PkgDecoder) TotalElems() int {
+ return len(pr.elemEnds)
+}
+
+// Fingerprint returns the package fingerprint.
+func (pr *PkgDecoder) Fingerprint() [8]byte {
+ var fp [8]byte
+ copy(fp[:], pr.elemData[len(pr.elemData)-8:])
+ return fp
+}
+
+// AbsIdx returns the absolute index for the given (section, index)
+// pair.
+func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
+ absIdx := int(idx)
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+// DataIdx returns the raw element bitstream for the given (section,
+// index) pair.
+func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
+ absIdx := pr.AbsIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+// StringIdx returns the string value for the given string index.
+func (pr *PkgDecoder) StringIdx(idx Index) string {
+ return pr.DataIdx(RelocString, idx)
+}
+
+// NewDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.NewDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+// TempDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+// If possible the Decoder should be RetireDecoder'd when it is no longer
+// needed, this will avoid heap allocations.
+func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.TempDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
+ pr.scratchRelocEnt = d.Relocs
+ d.Relocs = nil
+}
+
+// NewDecoderRaw returns a Decoder for the given (section, index) pair.
+//
+// Most callers should use NewDecoder instead.
+func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ r.Relocs = make([]RelocEnt, r.Len())
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ l := r.Len()
+ if cap(pr.scratchRelocEnt) >= l {
+ r.Relocs = pr.scratchRelocEnt[:l]
+ pr.scratchRelocEnt = nil
+ } else {
+ r.Relocs = make([]RelocEnt, l)
+ }
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+// A Decoder provides methods for decoding an individual element's
+// bitstream data.
+type Decoder struct {
+ common *PkgDecoder
+
+ Relocs []RelocEnt
+ Data strings.Reader
+
+ k RelocKind
+ Idx Index
+}
+
+func (r *Decoder) checkErr(err error) {
+ if err != nil {
+ panicf("unexpected decoding error: %w", err)
+ }
+}
+
+func (r *Decoder) rawUvarint() uint64 {
+ x, err := readUvarint(&r.Data)
+ r.checkErr(err)
+ return x
+}
+
+// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
+// This avoids the interface conversion and thus has better escape properties,
+// which flows up the stack.
+func readUvarint(r *strings.Reader) (uint64, error) {
+ var x uint64
+ var s uint
+ for i := range binary.MaxVarintLen64 {
+ b, err := r.ReadByte()
+ if err != nil {
+ if i > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return x, err
+ }
+ if b < 0x80 {
+ if i == binary.MaxVarintLen64-1 && b > 1 {
+ return x, overflow
+ }
+ return x | uint64(b)<> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
+ e := r.Relocs[idx]
+ assert(e.Kind == k)
+ return e.Idx
+}
+
+// Sync decodes a sync marker from the element bitstream and asserts
+// that it matches the expected marker.
+//
+// If r.common.sync is false, then Sync is a no-op.
+func (r *Decoder) Sync(mWant SyncMarker) {
+ if !r.common.sync {
+ return
+ }
+
+ pos, _ := r.Data.Seek(0, io.SeekCurrent)
+ mHave := SyncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+// Bool decodes and returns a bool value from the element bitstream.
+func (r *Decoder) Bool() bool {
+ r.Sync(SyncBool)
+ x, err := r.Data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+// Int64 decodes and returns an int64 value from the element bitstream.
+func (r *Decoder) Int64() int64 {
+ r.Sync(SyncInt64)
+ return r.rawVarint()
+}
+
+// Uint64 decodes and returns a uint64 value from the element bitstream.
+func (r *Decoder) Uint64() uint64 {
+ r.Sync(SyncUint64)
+ return r.rawUvarint()
+}
+
+// Len decodes and returns a non-negative int value from the element bitstream.
+func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
+
+// Int decodes and returns an int value from the element bitstream.
+func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
+
+// Uint decodes and returns a uint value from the element bitstream.
+func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+// Code decodes a Code value from the element bitstream and returns
+// its ordinal value. It's the caller's responsibility to convert the
+// result to an appropriate Code type.
+//
+// TODO(mdempsky): Ideally this method would have signature "Code[T
+// Code] T" instead, but we don't allow generic methods and the
+// compiler can't depend on generics yet anyway.
+func (r *Decoder) Code(mark SyncMarker) int {
+ r.Sync(mark)
+ return r.Len()
+}
+
+// Reloc decodes a relocation of expected section k from the element
+// bitstream and returns an index to the referenced element.
+func (r *Decoder) Reloc(k RelocKind) Index {
+ r.Sync(SyncUseReloc)
+ return r.rawReloc(k, r.Len())
+}
+
+// String decodes and returns a string value from the element
+// bitstream.
+func (r *Decoder) String() string {
+ r.Sync(SyncString)
+ return r.common.StringIdx(r.Reloc(RelocString))
+}
+
+// Strings decodes and returns a variable-length slice of strings from
+// the element bitstream.
+func (r *Decoder) Strings() []string {
+ res := make([]string, r.Len())
+ for i := range res {
+ res[i] = r.String()
+ }
+ return res
+}
+
+// Value decodes and returns a constant.Value from the element
+// bitstream.
+func (r *Decoder) Value() constant.Value {
+ r.Sync(SyncValue)
+ isComplex := r.Bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *Decoder) scalar() constant.Value {
+ switch tag := CodeVal(r.Code(SyncVal)); tag {
+ default:
+ panic(fmt.Errorf("unexpected scalar tag: %v", tag))
+
+ case ValBool:
+ return constant.MakeBool(r.Bool())
+ case ValString:
+ return constant.MakeString(r.String())
+ case ValInt64:
+ return constant.MakeInt64(r.Int64())
+ case ValBigInt:
+ return constant.Make(r.bigInt())
+ case ValBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case ValBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *Decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.String()))
+ if r.Bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *Decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.String())) == nil)
+ return v
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+// PeekPkgPath returns the package path for the specified package
+// index.
+func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
+ var path string
+ {
+ r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
+ path = r.String()
+ pr.RetireDecoder(&r)
+ }
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+// PeekObj returns the package path, object name, and CodeObj for the
+// specified object index.
+func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
+ var ridx Index
+ var name string
+ var rcode int
+ {
+ r := pr.TempDecoder(RelocName, idx, SyncObject1)
+ r.Sync(SyncSym)
+ r.Sync(SyncPkg)
+ ridx = r.Reloc(RelocPkg)
+ name = r.String()
+ rcode = r.Code(SyncCodeObj)
+ pr.RetireDecoder(&r)
+ }
+
+ path := pr.PeekPkgPath(ridx)
+ assert(name != "")
+
+ tag := CodeObj(rcode)
+
+ return path, name, tag
+}
+
+// Version reports the version of the bitstream.
+func (w *Decoder) Version() Version { return w.common.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/internal/pkgbits/doc.go
new file mode 100644
index 0000000000..c8a2796b5e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/doc.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkgbits implements low-level coding abstractions for
+// Unified IR's export data format.
+//
+// At a low-level, a package is a collection of bitstream elements.
+// Each element has a "kind" and a dense, non-negative index.
+// Elements can be randomly accessed given their kind and index.
+//
+// Individual elements are sequences of variable-length values (e.g.,
+// integers, booleans, strings, go/constant values, cross-references
+// to other elements). Package pkgbits provides APIs for encoding and
+// decoding these low-level values, but the details of mapping
+// higher-level Go constructs into elements is left to higher-level
+// abstractions.
+//
+// Elements may cross-reference each other with "relocations." For
+// example, an element representing a pointer type has a relocation
+// referring to the element type.
+//
+// Go constructs may be composed as a constellation of multiple
+// elements. For example, a declared function may have one element to
+// describe the object (e.g., its name, type, position), and a
+// separate element to describe its function body. This allows readers
+// some flexibility in efficiently seeking or re-reading data (e.g.,
+// inlining requires re-reading the function body for each inlined
+// call, without needing to re-read the object-level details).
+//
+// This is a copy of internal/pkgbits in the Go implementation.
+package pkgbits
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
new file mode 100644
index 0000000000..c17a12399d
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -0,0 +1,392 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+ "strings"
+)
+
+// A PkgEncoder provides methods for encoding a package's Unified IR
+// export data.
+type PkgEncoder struct {
+ // version of the bitstream.
+ version Version
+
+ // elems holds the bitstream for previously encoded elements.
+ elems [numRelocs][]string
+
+ // stringsIdx maps previously encoded strings to their index within
+ // the RelocString section, to allow deduplication. That is,
+ // elems[RelocString][stringsIdx[s]] == s (if present).
+ stringsIdx map[string]Index
+
+ // syncFrames is the number of frames to write at each sync
+ // marker. A negative value means sync markers are omitted.
+ syncFrames int
+}
+
+// SyncMarkers reports whether pw uses sync markers.
+func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
+
+// NewPkgEncoder returns an initialized PkgEncoder.
+//
+// syncFrames is the number of caller frames that should be serialized
+// at Sync points. Serializing additional frames results in larger
+// export data files, but can help diagnosing desync errors in
+// higher-level Unified IR reader/writer code. If syncFrames is
+// negative, then sync markers are omitted entirely.
+func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
+ return PkgEncoder{
+ version: version,
+ stringsIdx: make(map[string]Index),
+ syncFrames: syncFrames,
+ }
+}
+
+// DumpTo writes the package's encoded data to out0 and returns the
+// package fingerprint.
+func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
+ h := md5.New()
+ out := io.MultiWriter(out0, h)
+
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ writeUint32(uint32(pw.version))
+
+ if pw.version.Has(Flags) {
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
+ }
+
+ // Write elemEndsEnds.
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ // Write elemEnds.
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ // Write elemData.
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+
+ // Write fingerprint.
+ copy(fingerprint[:], h.Sum(nil))
+ _, err := out0.Write(fingerprint[:])
+ assert(err == nil)
+
+ return
+}
+
+// StringIdx adds a string value to the strings section, if not
+// already present, and returns its index.
+func (pw *PkgEncoder) StringIdx(s string) Index {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[RelocString][idx] == s)
+ return idx
+ }
+
+ idx := Index(len(pw.elems[RelocString]))
+ pw.elems[RelocString] = append(pw.elems[RelocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+// NewEncoder returns an Encoder for a new element within the given
+// section, and encodes the given SyncMarker as the start of the
+// element bitstream.
+func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
+ e := pw.NewEncoderRaw(k)
+ e.Sync(marker)
+ return e
+}
+
+// NewEncoderRaw returns an Encoder for a new element within the given
+// section.
+//
+// Most callers should use NewEncoder instead.
+func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
+ idx := Index(len(pw.elems[k]))
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return Encoder{
+ p: pw,
+ k: k,
+ Idx: idx,
+ }
+}
+
+// An Encoder provides methods for encoding an individual element's
+// bitstream data.
+type Encoder struct {
+ p *PkgEncoder
+
+ Relocs []RelocEnt
+ RelocMap map[RelocEnt]uint32
+ Data bytes.Buffer // accumulated element bitstream data
+
+ encodingRelocHeader bool
+
+ k RelocKind
+ Idx Index // index within relocation section
+}
+
+// Flush finalizes the element's bitstream and returns its Index.
+func (w *Encoder) Flush() Index {
+ var sb strings.Builder
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.Data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ panic("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.Sync(SyncRelocs)
+ w.Len(len(w.Relocs))
+ for _, rEnt := range w.Relocs {
+ w.Sync(SyncReloc)
+ w.Len(int(rEnt.Kind))
+ w.Len(int(rEnt.Idx))
+ }
+
+ io.Copy(&sb, &w.Data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.Idx] = sb.String()
+
+ return w.Idx
+}
+
+func (w *Encoder) checkErr(err error) {
+ if err != nil {
+ panicf("unexpected encoding error: %v", err)
+ }
+}
+
+func (w *Encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.Data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *Encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
+ e := RelocEnt{r, idx}
+ if w.RelocMap != nil {
+ if i, ok := w.RelocMap[e]; ok {
+ return int(i)
+ }
+ } else {
+ w.RelocMap = make(map[RelocEnt]uint32)
+ }
+
+ i := len(w.Relocs)
+ w.RelocMap[e] = uint32(i)
+ w.Relocs = append(w.Relocs, e)
+ return i
+}
+
+func (w *Encoder) Sync(m SyncMarker) {
+ if !w.p.SyncMarkers() {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && w.p.syncFrames > 0 {
+ pcs := make([]uintptr, w.p.syncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
+ }
+}
+
+// Bool encodes and writes a bool value into the element bitstream,
+// and then returns the bool value.
+//
+// For simple, 2-alternative encodings, the idiomatic way to call Bool
+// is something like:
+//
+// if w.Bool(x != 0) {
+// // alternative #1
+// } else {
+// // alternative #2
+// }
+//
+// For multi-alternative encodings, use Code instead.
+func (w *Encoder) Bool(b bool) bool {
+ w.Sync(SyncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.Data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+// Int64 encodes and writes an int64 value into the element bitstream.
+func (w *Encoder) Int64(x int64) {
+ w.Sync(SyncInt64)
+ w.rawVarint(x)
+}
+
+// Uint64 encodes and writes a uint64 value into the element bitstream.
+func (w *Encoder) Uint64(x uint64) {
+ w.Sync(SyncUint64)
+ w.rawUvarint(x)
+}
+
+// Len encodes and writes a non-negative int value into the element bitstream.
+func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
+
+// Int encodes and writes an int value into the element bitstream.
+func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
+
+// Uint encodes and writes a uint value into the element bitstream.
+func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
+
+// Reloc encodes and writes a relocation for the given (section,
+// index) pair into the element bitstream.
+//
+// Note: Only the index is formally written into the element
+// bitstream, so bitstream decoders must know from context which
+// section an encoded relocation refers to.
+func (w *Encoder) Reloc(r RelocKind, idx Index) {
+ w.Sync(SyncUseReloc)
+ w.Len(w.rawReloc(r, idx))
+}
+
+// Code encodes and writes a Code value into the element bitstream.
+func (w *Encoder) Code(c Code) {
+ w.Sync(c.Marker())
+ w.Len(c.Value())
+}
+
+// String encodes and writes a string value into the element
+// bitstream.
+//
+// Internally, strings are deduplicated by adding them to the strings
+// section (if not already present), and then writing a relocation
+// into the element bitstream.
+func (w *Encoder) String(s string) {
+ w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
+ w.Sync(SyncString)
+ w.Reloc(RelocString, idx)
+}
+
+// Strings encodes and writes a variable-length slice of strings into
+// the element bitstream.
+func (w *Encoder) Strings(ss []string) {
+ w.Len(len(ss))
+ for _, s := range ss {
+ w.String(s)
+ }
+}
+
+// Value encodes and writes a constant.Value into the element
+// bitstream.
+func (w *Encoder) Value(val constant.Value) {
+ w.Sync(SyncValue)
+ if w.Bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *Encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ panicf("unhandled %v (%v)", val, val.Kind())
+ case bool:
+ w.Code(ValBool)
+ w.Bool(v)
+ case string:
+ w.Code(ValString)
+ w.String(v)
+ case int64:
+ w.Code(ValInt64)
+ w.Int64(v)
+ case *big.Int:
+ w.Code(ValBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.Code(ValBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.Code(ValBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *Encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.String(string(b)) // TODO: More efficient encoding.
+ w.Bool(v.Sign() < 0)
+}
+
+func (w *Encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.String(string(b)) // TODO: More efficient encoding.
+}
+
+// Version reports the version of the bitstream.
+func (w *Encoder) Version() Version { return w.p.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/internal/pkgbits/flags.go
new file mode 100644
index 0000000000..654222745f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+const (
+ flagSyncMarkers = 1 << iota // file format contains sync markers
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go
new file mode 100644
index 0000000000..fcdfb97ca9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A RelocKind indicates a particular section within a unified IR export.
+type RelocKind int32
+
+// An Index represents a bitstream element index within a particular
+// section.
+type Index int32
+
+// A relocEnt (relocation entry) is an entry in an element's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type RelocEnt struct {
+ Kind RelocKind
+ Idx Index
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ PublicRootIdx Index = 0
+ PrivateRootIdx Index = 1
+)
+
+const (
+ RelocString RelocKind = iota
+ RelocMeta
+ RelocPosBase
+ RelocPkg
+ RelocName
+ RelocType
+ RelocObj
+ RelocObjExt
+ RelocObjDict
+ RelocBody
+
+ numRelocs = iota
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
new file mode 100644
index 0000000000..50534a2955
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import "fmt"
+
+func assert(b bool) {
+ if !b {
+ panic("assertion failed")
+ }
+}
+
+func panicf(format string, args ...any) {
+ panic(fmt.Errorf(format, args...))
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
new file mode 100644
index 0000000000..1520b73afb
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+)
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
+
+// SyncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type SyncMarker int
+
+//go:generate stringer -type=SyncMarker -trimprefix=Sync
+
+const (
+ _ SyncMarker = iota
+
+ // Public markers (known to go/types importers).
+
+ // Low-level coding markers.
+ SyncEOF
+ SyncBool
+ SyncInt64
+ SyncUint64
+ SyncString
+ SyncValue
+ SyncVal
+ SyncRelocs
+ SyncReloc
+ SyncUseReloc
+
+ // Higher-level object and type markers.
+ SyncPublic
+ SyncPos
+ SyncPosBase
+ SyncObject
+ SyncObject1
+ SyncPkg
+ SyncPkgDef
+ SyncMethod
+ SyncType
+ SyncTypeIdx
+ SyncTypeParamNames
+ SyncSignature
+ SyncParams
+ SyncParam
+ SyncCodeObj
+ SyncSym
+ SyncLocalIdent
+ SyncSelector
+
+ // Private markers (only known to cmd/compile).
+ SyncPrivate
+
+ SyncFuncExt
+ SyncVarExt
+ SyncTypeExt
+ SyncPragma
+
+ SyncExprList
+ SyncExprs
+ SyncExpr
+ SyncExprType
+ SyncAssign
+ SyncOp
+ SyncFuncLit
+ SyncCompLit
+
+ SyncDecl
+ SyncFuncBody
+ SyncOpenScope
+ SyncCloseScope
+ SyncCloseAnotherScope
+ SyncDeclNames
+ SyncDeclName
+
+ SyncStmts
+ SyncBlockStmt
+ SyncIfStmt
+ SyncForStmt
+ SyncSwitchStmt
+ SyncRangeStmt
+ SyncCaseClause
+ SyncCommClause
+ SyncSelectStmt
+ SyncDecls
+ SyncLabeledStmt
+ SyncUseObjLocal
+ SyncAddLocal
+ SyncLinkname
+ SyncStmt1
+ SyncStmtsEnd
+ SyncLabel
+ SyncOptLabel
+
+ SyncMultiExpr
+ SyncRType
+ SyncConvRTTI
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
new file mode 100644
index 0000000000..582ad56d3e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
+
+package pkgbits
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SyncEOF-1]
+ _ = x[SyncBool-2]
+ _ = x[SyncInt64-3]
+ _ = x[SyncUint64-4]
+ _ = x[SyncString-5]
+ _ = x[SyncValue-6]
+ _ = x[SyncVal-7]
+ _ = x[SyncRelocs-8]
+ _ = x[SyncReloc-9]
+ _ = x[SyncUseReloc-10]
+ _ = x[SyncPublic-11]
+ _ = x[SyncPos-12]
+ _ = x[SyncPosBase-13]
+ _ = x[SyncObject-14]
+ _ = x[SyncObject1-15]
+ _ = x[SyncPkg-16]
+ _ = x[SyncPkgDef-17]
+ _ = x[SyncMethod-18]
+ _ = x[SyncType-19]
+ _ = x[SyncTypeIdx-20]
+ _ = x[SyncTypeParamNames-21]
+ _ = x[SyncSignature-22]
+ _ = x[SyncParams-23]
+ _ = x[SyncParam-24]
+ _ = x[SyncCodeObj-25]
+ _ = x[SyncSym-26]
+ _ = x[SyncLocalIdent-27]
+ _ = x[SyncSelector-28]
+ _ = x[SyncPrivate-29]
+ _ = x[SyncFuncExt-30]
+ _ = x[SyncVarExt-31]
+ _ = x[SyncTypeExt-32]
+ _ = x[SyncPragma-33]
+ _ = x[SyncExprList-34]
+ _ = x[SyncExprs-35]
+ _ = x[SyncExpr-36]
+ _ = x[SyncExprType-37]
+ _ = x[SyncAssign-38]
+ _ = x[SyncOp-39]
+ _ = x[SyncFuncLit-40]
+ _ = x[SyncCompLit-41]
+ _ = x[SyncDecl-42]
+ _ = x[SyncFuncBody-43]
+ _ = x[SyncOpenScope-44]
+ _ = x[SyncCloseScope-45]
+ _ = x[SyncCloseAnotherScope-46]
+ _ = x[SyncDeclNames-47]
+ _ = x[SyncDeclName-48]
+ _ = x[SyncStmts-49]
+ _ = x[SyncBlockStmt-50]
+ _ = x[SyncIfStmt-51]
+ _ = x[SyncForStmt-52]
+ _ = x[SyncSwitchStmt-53]
+ _ = x[SyncRangeStmt-54]
+ _ = x[SyncCaseClause-55]
+ _ = x[SyncCommClause-56]
+ _ = x[SyncSelectStmt-57]
+ _ = x[SyncDecls-58]
+ _ = x[SyncLabeledStmt-59]
+ _ = x[SyncUseObjLocal-60]
+ _ = x[SyncAddLocal-61]
+ _ = x[SyncLinkname-62]
+ _ = x[SyncStmt1-63]
+ _ = x[SyncStmtsEnd-64]
+ _ = x[SyncLabel-65]
+ _ = x[SyncOptLabel-66]
+ _ = x[SyncMultiExpr-67]
+ _ = x[SyncRType-68]
+ _ = x[SyncConvRTTI-69]
+}
+
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
+
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
+
+func (i SyncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
+ return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
new file mode 100644
index 0000000000..53af9df22b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// Version indicates a version of a unified IR bitstream.
+// Each Version indicates the addition, removal, or change of
+// new data in the bitstream.
+//
+// These are serialized to disk and the interpretation remains fixed.
+type Version uint32
+
+const (
+ // V0: initial prototype.
+ //
+ // All data that is not assigned a Field is in version V0
+ // and has not been deprecated.
+ V0 Version = iota
+
+ // V1: adds the Flags uint32 word
+ V1
+
+ // V2: removes unused legacy fields and supports type parameters for aliases.
+ // - remove the legacy "has init" bool from the public root
+ // - remove obj's "derived func instance" bool
+ // - add a TypeParamNames field to ObjAlias
+ // - remove derived info "needed" bool
+ V2
+
+ numVersions = iota
+)
+
+// Field denotes a unit of data in the serialized unified IR bitstream.
+// It is conceptually a like field in a structure.
+//
+// We only really need Fields when the data may or may not be present
+// in a stream based on the Version of the bitstream.
+//
+// Unlike much of pkgbits, Fields are not serialized and
+// can change values as needed.
+type Field int
+
+const (
+ // Flags in a uint32 in the header of a bitstream
+ // that is used to indicate whether optional features are enabled.
+ Flags Field = iota
+
+ // Deprecated: HasInit was a bool indicating whether a package
+ // has any init functions.
+ HasInit
+
+ // Deprecated: DerivedFuncInstance was a bool indicating
+ // whether an object was a function instance.
+ DerivedFuncInstance
+
+ // ObjAlias has a list of TypeParamNames.
+ AliasTypeParamNames
+
+ // Deprecated: DerivedInfoNeeded was a bool indicating
+ // whether a type was a derived type.
+ DerivedInfoNeeded
+
+ numFields = iota
+)
+
+// introduced is the version a field was added.
+var introduced = [numFields]Version{
+ Flags: V1,
+ AliasTypeParamNames: V2,
+}
+
+// removed is the version a field was removed in or 0 for fields
+// that have not yet been deprecated.
+// (So removed[f]-1 is the last version it is included in.)
+var removed = [numFields]Version{
+ HasInit: V2,
+ DerivedFuncInstance: V2,
+ DerivedInfoNeeded: V2,
+}
+
+// Has reports whether field f is present in a bitstream at version v.
+func (v Version) Has(f Field) bool {
+ return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
new file mode 100644
index 0000000000..96ad6c5821
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -0,0 +1,365 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+type pkginfo struct {
+ name string
+ deps string // list of indices of dependencies, as varint-encoded deltas
+}
+
+var deps = [...]pkginfo{
+ {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
+ {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
+ {"bufio", "\x03k\x83\x01D\x14"},
+ {"bytes", "n*Y\x03\fG\x02\x02"},
+ {"cmp", ""},
+ {"compress/bzip2", "\x02\x02\xed\x01A"},
+ {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"},
+ {"compress/gzip", "\x02\x04a\a\x03\x14lT"},
+ {"compress/lzw", "\x02l\x03\x80\x01"},
+ {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"},
+ {"container/heap", "\xb3\x02"},
+ {"container/list", ""},
+ {"container/ring", ""},
+ {"context", "n\\m\x01\r"},
+ {"crypto", "\x83\x01nC"},
+ {"crypto/aes", "\x10\n\a\x93\x02"},
+ {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"},
+ {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"},
+ {"crypto/dsa", "A\x04)\x83\x01\r"},
+ {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"},
+ {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"},
+ {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"},
+ {"crypto/elliptic", "0>\x83\x01\r9"},
+ {"crypto/fips140", " \x05"},
+ {"crypto/hkdf", "-\x13\x01-\x15"},
+ {"crypto/hmac", "\x1a\x14\x12\x01\x111"},
+ {"crypto/internal/boring", "\x0e\x02\rf"},
+ {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"},
+ {"crypto/internal/boring/bcache", "\xb8\x02\x13"},
+ {"crypto/internal/boring/sig", ""},
+ {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
+ {"crypto/internal/entropy", "F"},
+ {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"},
+ {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"},
+ {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"},
+ {"crypto/internal/fips140/alias", "\xcb\x02"},
+ {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"},
+ {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"},
+ {"crypto/internal/fips140/check/checktest", "%\x85\x02!"},
+ {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"},
+ {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"},
+ {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"},
+ {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"},
+ {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"},
+ {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"},
+ {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"},
+ {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"},
+ {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"},
+ {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"},
+ {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"},
+ {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"},
+ {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"},
+ {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"},
+ {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"},
+ {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"},
+ {"crypto/internal/fips140/ssh", "%^"},
+ {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"},
+ {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"},
+ {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"},
+ {"crypto/internal/fips140cache", "\xaa\x02\r&"},
+ {"crypto/internal/fips140deps", ""},
+ {"crypto/internal/fips140deps/byteorder", "\x99\x01"},
+ {"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
+ {"crypto/internal/fips140deps/godebug", "\xb6\x01"},
+ {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"},
+ {"crypto/internal/fips140only", "'\r\x01\x01M3;"},
+ {"crypto/internal/fips140test", ""},
+ {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"},
+ {"crypto/internal/impl", "\xb5\x02"},
+ {"crypto/internal/randutil", "\xf1\x01\x12"},
+ {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"},
+ {"crypto/internal/sysrand/internal/seccomp", "n"},
+ {"crypto/md5", "\x0e3-\x15\x16g"},
+ {"crypto/mlkem", "/"},
+ {"crypto/pbkdf2", "2\x0e\x01-\x15"},
+ {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"},
+ {"crypto/rc4", "#\x1e-\xc6\x01"},
+ {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"},
+ {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"},
+ {"crypto/sha256", "\x0e\f\x1aO"},
+ {"crypto/sha3", "\x0e'N\xc8\x01"},
+ {"crypto/sha512", "\x0e\f\x1cM"},
+ {"crypto/subtle", "8\x9b\x01W"},
+ {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
+ {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"},
+ {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
+ {"crypto/x509/pkix", "d\x06\a\x8d\x01G"},
+ {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
+ {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"},
+ {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"},
+ {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"},
+ {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"},
+ {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"},
+ {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"},
+ {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"},
+ {"debug/plan9obj", "g\a\x03e\x1b,"},
+ {"embed", "n*@\x19\x01S"},
+ {"embed/internal/embedtest", ""},
+ {"encoding", ""},
+ {"encoding/ascii85", "\xf1\x01C"},
+ {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"},
+ {"encoding/base32", "\xf1\x01A\x02"},
+ {"encoding/base64", "\x99\x01XA\x02"},
+ {"encoding/binary", "n\x83\x01\f(\r\x05"},
+ {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"},
+ {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
+ {"encoding/hex", "n\x03\x80\x01A\x03"},
+ {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
+ {"encoding/pem", "\x03c\b\x83\x01A\x03"},
+ {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"},
+ {"errors", "\xca\x01\x81\x01"},
+ {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"},
+ {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"},
+ {"fmt", "nE>\f \b\r\x02\x03\x12"},
+ {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"},
+ {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
+ {"go/build/constraint", "n\xc6\x01\x01\x12\x02"},
+ {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"},
+ {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"},
+ {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"},
+ {"go/format", "\x03n\x01\v\x01\x02qD"},
+ {"go/importer", "s\a\x01\x01\x04\x01p9"},
+ {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"},
+ {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"},
+ {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"},
+ {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"},
+ {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"},
+ {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"},
+ {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"},
+ {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
+ {"go/version", "\xbb\x01z"},
+ {"hash", "\xf1\x01"},
+ {"hash/adler32", "n\x15\x16"},
+ {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"},
+ {"hash/crc64", "n\x15\x16\x9e\x01"},
+ {"hash/fnv", "n\x15\x16g"},
+ {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"},
+ {"html", "\xb5\x02\x02\x12"},
+ {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
+ {"image", "\x02l\x1ee\x0f4\x03\x01"},
+ {"image/color", ""},
+ {"image/color/palette", "\x8c\x01"},
+ {"image/draw", "\x8b\x01\x01\x04"},
+ {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"},
+ {"image/internal/imageutil", "\x8b\x01"},
+ {"image/jpeg", "\x02l\x1d\x01\x04a"},
+ {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"},
+ {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"},
+ {"internal/abi", "\xb5\x01\x96\x01"},
+ {"internal/asan", "\xcb\x02"},
+ {"internal/bisect", "\xaa\x02\r\x01"},
+ {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"},
+ {"internal/bytealg", "\xae\x01\x9d\x01"},
+ {"internal/byteorder", ""},
+ {"internal/cfg", ""},
+ {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"},
+ {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"},
+ {"internal/copyright", ""},
+ {"internal/coverage", ""},
+ {"internal/coverage/calloc", ""},
+ {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"},
+ {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"},
+ {"internal/coverage/cmerge", "q-_"},
+ {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"},
+ {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"},
+ {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"},
+ {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."},
+ {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"},
+ {"internal/coverage/rtcov", "\xcb\x02"},
+ {"internal/coverage/slicereader", "g\n\x80\x01Z"},
+ {"internal/coverage/slicewriter", "q\x80\x01"},
+ {"internal/coverage/stringtab", "q8\x04D"},
+ {"internal/coverage/test", ""},
+ {"internal/coverage/uleb128", ""},
+ {"internal/cpu", "\xcb\x02"},
+ {"internal/dag", "\x04m\xc1\x01\x03"},
+ {"internal/diff", "\x03n\xc2\x01\x02"},
+ {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"},
+ {"internal/filepathlite", "n*@\x1a@"},
+ {"internal/fmtsort", "\x04\xa1\x02\r"},
+ {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
+ {"internal/goarch", ""},
+ {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"},
+ {"internal/godebugs", ""},
+ {"internal/goexperiment", ""},
+ {"internal/goos", ""},
+ {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"},
+ {"internal/gover", "\x04"},
+ {"internal/goversion", ""},
+ {"internal/itoa", ""},
+ {"internal/lazyregexp", "\x9d\x02\v\r\x02"},
+ {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"},
+ {"internal/msan", "\xcb\x02"},
+ {"internal/nettrace", ""},
+ {"internal/obscuretestdata", "f\x8b\x01,"},
+ {"internal/oserror", "n"},
+ {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"},
+ {"internal/platform", ""},
+ {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"},
+ {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"},
+ {"internal/profilerecord", ""},
+ {"internal/race", "\x94\x01\xb7\x01"},
+ {"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"},
+ {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"},
+ {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"},
+ {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"},
+ {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"},
+ {"net/http/httptrace", "\rFnF\x14\n "},
+ {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"},
+ {"net/http/internal", "\x02\x01k\x03\x80\x01"},
+ {"net/http/internal/ascii", "\xb5\x02\x12"},
+ {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"},
+ {"net/http/internal/testcert", "\xb5\x02"},
+ {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"},
+ {"net/internal/cgotest", ""},
+ {"net/internal/socktest", "q\xc6\x01\x02"},
+ {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"},
+ {"net/netip", "\x04j*\x01$@\x034\x16"},
+ {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"},
+ {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"},
+ {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"},
+ {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"},
+ {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"},
+ {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"},
+ {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"},
+ {"os/exec/internal/fdtest", "\xb9\x02"},
+ {"os/signal", "\r\x90\x02\x15\x05\x02"},
+ {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"},
+ {"path", "n*\xb1\x01"},
+ {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"},
+ {"plugin", "n"},
+ {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"},
+ {"reflect/internal/example1", ""},
+ {"reflect/internal/example2", ""},
+ {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"},
+ {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"},
+ {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"},
+ {"runtime/coverage", "\xa0\x01Q"},
+ {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"},
+ {"runtime/metrics", "\xb7\x01F-!"},
+ {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"},
+ {"runtime/race", "\xb0\x02"},
+ {"runtime/race/internal/amd64v1", ""},
+ {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"},
+ {"slices", "\x04\xf0\x01\fK"},
+ {"sort", "\xca\x0162"},
+ {"strconv", "n*@%\x03I"},
+ {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"},
+ {"structs", ""},
+ {"sync", "\xc9\x01\x10\x01P\x0e\x13"},
+ {"sync/atomic", "\xcb\x02"},
+ {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"},
+ {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"},
+ {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"},
+ {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"},
+ {"testing/iotest", "\x03k\x03\x80\x01\x04"},
+ {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"},
+ {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"},
+ {"testing/synctest", "\xda\x01`\x11"},
+ {"text/scanner", "\x03n\x80\x01,*\x02"},
+ {"text/tabwriter", "q\x80\x01X"},
+ {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"},
+ {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"},
+ {"time", "n*\x1e\"(*\r\x02\x12"},
+ {"time/tzdata", "n\xcb\x01\x12"},
+ {"unicode", ""},
+ {"unicode/utf16", ""},
+ {"unicode/utf8", ""},
+ {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"},
+ {"unsafe", ""},
+ {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"},
+ {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"},
+ {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"},
+ {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
+ {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"},
+ {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"},
+ {"vendor/golang.org/x/net/dns/dnsmessage", "n"},
+ {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"},
+ {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"},
+ {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"},
+ {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"},
+ {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"},
+ {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"},
+ {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"},
+ {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"},
+ {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"},
+ {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"},
+ {"weak", "\x94\x01\x96\x01!"},
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go
new file mode 100644
index 0000000000..f6909878a8
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/import.go
@@ -0,0 +1,89 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stdlib
+
+// This file provides the API for the import graph of the standard library.
+//
+// Be aware that the compiler-generated code for every package
+// implicitly depends on package "runtime" and a handful of others
+// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
+
+import (
+ "encoding/binary"
+ "iter"
+ "slices"
+ "strings"
+)
+
+// Imports returns the sequence of packages directly imported by the
+// named standard packages, in name order.
+// The imports of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Imports(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !yield(deps[depIndex].name) {
+ return
+ }
+ data = data[n:]
+ }
+ }
+ }
+ }
+}
+
+// Dependencies returns the set of all dependencies of the named
+// standard packages, including the initial package,
+// in a deterministic topological order.
+// The dependencies of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Dependencies(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var seen [1 + len(deps)/8]byte // bit set of seen packages
+ var visit func(i int) bool
+ visit = func(i int) bool {
+ bit := byte(1) << (i % 8)
+ if seen[i/8]&bit == 0 {
+ seen[i/8] |= bit
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !visit(int(depIndex)) {
+ return false
+ }
+ data = data[n:]
+ }
+ if !yield(deps[i].name) {
+ return false
+ }
+ }
+ return true
+ }
+ if !visit(i) {
+ return
+ }
+ }
+ }
+ }
+}
+
+// find returns the index of pkg in the deps table.
+func find(pkg string) (int, bool) {
+ return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
+ return strings.Compare(p.name, n)
+ })
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
new file mode 100644
index 0000000000..c1faa50d36
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -0,0 +1,17726 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+var PackageSymbols = map[string][]Symbol{
+ "archive/tar": {
+ {"(*Header).FileInfo", Method, 1, ""},
+ {"(*Reader).Next", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Writer).AddFS", Method, 22, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(*Writer).WriteHeader", Method, 0, ""},
+ {"(Format).String", Method, 10, ""},
+ {"ErrFieldTooLong", Var, 0, ""},
+ {"ErrHeader", Var, 0, ""},
+ {"ErrInsecurePath", Var, 20, ""},
+ {"ErrWriteAfterClose", Var, 0, ""},
+ {"ErrWriteTooLong", Var, 0, ""},
+ {"FileInfoHeader", Func, 1, "func(fi fs.FileInfo, link string) (*Header, error)"},
+ {"FileInfoNames", Type, 23, ""},
+ {"Format", Type, 10, ""},
+ {"FormatGNU", Const, 10, ""},
+ {"FormatPAX", Const, 10, ""},
+ {"FormatUSTAR", Const, 10, ""},
+ {"FormatUnknown", Const, 10, ""},
+ {"Header", Type, 0, ""},
+ {"Header.AccessTime", Field, 0, ""},
+ {"Header.ChangeTime", Field, 0, ""},
+ {"Header.Devmajor", Field, 0, ""},
+ {"Header.Devminor", Field, 0, ""},
+ {"Header.Format", Field, 10, ""},
+ {"Header.Gid", Field, 0, ""},
+ {"Header.Gname", Field, 0, ""},
+ {"Header.Linkname", Field, 0, ""},
+ {"Header.ModTime", Field, 0, ""},
+ {"Header.Mode", Field, 0, ""},
+ {"Header.Name", Field, 0, ""},
+ {"Header.PAXRecords", Field, 10, ""},
+ {"Header.Size", Field, 0, ""},
+ {"Header.Typeflag", Field, 0, ""},
+ {"Header.Uid", Field, 0, ""},
+ {"Header.Uname", Field, 0, ""},
+ {"Header.Xattrs", Field, 3, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) *Reader"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"Reader", Type, 0, ""},
+ {"TypeBlock", Const, 0, ""},
+ {"TypeChar", Const, 0, ""},
+ {"TypeCont", Const, 0, ""},
+ {"TypeDir", Const, 0, ""},
+ {"TypeFifo", Const, 0, ""},
+ {"TypeGNULongLink", Const, 1, ""},
+ {"TypeGNULongName", Const, 1, ""},
+ {"TypeGNUSparse", Const, 3, ""},
+ {"TypeLink", Const, 0, ""},
+ {"TypeReg", Const, 0, ""},
+ {"TypeRegA", Const, 0, ""},
+ {"TypeSymlink", Const, 0, ""},
+ {"TypeXGlobalHeader", Const, 0, ""},
+ {"TypeXHeader", Const, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "archive/zip": {
+ {"(*File).DataOffset", Method, 2, ""},
+ {"(*File).FileInfo", Method, 0, ""},
+ {"(*File).ModTime", Method, 0, ""},
+ {"(*File).Mode", Method, 0, ""},
+ {"(*File).Open", Method, 0, ""},
+ {"(*File).OpenRaw", Method, 17, ""},
+ {"(*File).SetModTime", Method, 0, ""},
+ {"(*File).SetMode", Method, 0, ""},
+ {"(*FileHeader).FileInfo", Method, 0, ""},
+ {"(*FileHeader).ModTime", Method, 0, ""},
+ {"(*FileHeader).Mode", Method, 0, ""},
+ {"(*FileHeader).SetModTime", Method, 0, ""},
+ {"(*FileHeader).SetMode", Method, 0, ""},
+ {"(*ReadCloser).Close", Method, 0, ""},
+ {"(*ReadCloser).Open", Method, 16, ""},
+ {"(*ReadCloser).RegisterDecompressor", Method, 6, ""},
+ {"(*Reader).Open", Method, 16, ""},
+ {"(*Reader).RegisterDecompressor", Method, 6, ""},
+ {"(*Writer).AddFS", Method, 22, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Copy", Method, 17, ""},
+ {"(*Writer).Create", Method, 0, ""},
+ {"(*Writer).CreateHeader", Method, 0, ""},
+ {"(*Writer).CreateRaw", Method, 17, ""},
+ {"(*Writer).Flush", Method, 4, ""},
+ {"(*Writer).RegisterCompressor", Method, 6, ""},
+ {"(*Writer).SetComment", Method, 10, ""},
+ {"(*Writer).SetOffset", Method, 5, ""},
+ {"Compressor", Type, 2, ""},
+ {"Decompressor", Type, 2, ""},
+ {"Deflate", Const, 0, ""},
+ {"ErrAlgorithm", Var, 0, ""},
+ {"ErrChecksum", Var, 0, ""},
+ {"ErrFormat", Var, 0, ""},
+ {"ErrInsecurePath", Var, 20, ""},
+ {"File", Type, 0, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.CRC32", Field, 0, ""},
+ {"FileHeader.Comment", Field, 0, ""},
+ {"FileHeader.CompressedSize", Field, 0, ""},
+ {"FileHeader.CompressedSize64", Field, 1, ""},
+ {"FileHeader.CreatorVersion", Field, 0, ""},
+ {"FileHeader.ExternalAttrs", Field, 0, ""},
+ {"FileHeader.Extra", Field, 0, ""},
+ {"FileHeader.Flags", Field, 0, ""},
+ {"FileHeader.Method", Field, 0, ""},
+ {"FileHeader.Modified", Field, 10, ""},
+ {"FileHeader.ModifiedDate", Field, 0, ""},
+ {"FileHeader.ModifiedTime", Field, 0, ""},
+ {"FileHeader.Name", Field, 0, ""},
+ {"FileHeader.NonUTF8", Field, 10, ""},
+ {"FileHeader.ReaderVersion", Field, 0, ""},
+ {"FileHeader.UncompressedSize", Field, 0, ""},
+ {"FileHeader.UncompressedSize64", Field, 1, ""},
+ {"FileInfoHeader", Func, 0, "func(fi fs.FileInfo) (*FileHeader, error)"},
+ {"NewReader", Func, 0, "func(r io.ReaderAt, size int64) (*Reader, error)"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"OpenReader", Func, 0, "func(name string) (*ReadCloser, error)"},
+ {"ReadCloser", Type, 0, ""},
+ {"ReadCloser.Reader", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.Comment", Field, 0, ""},
+ {"Reader.File", Field, 0, ""},
+ {"RegisterCompressor", Func, 2, "func(method uint16, comp Compressor)"},
+ {"RegisterDecompressor", Func, 2, "func(method uint16, dcomp Decompressor)"},
+ {"Store", Const, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "bufio": {
+ {"(*Reader).Buffered", Method, 0, ""},
+ {"(*Reader).Discard", Method, 5, ""},
+ {"(*Reader).Peek", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadByte", Method, 0, ""},
+ {"(*Reader).ReadBytes", Method, 0, ""},
+ {"(*Reader).ReadLine", Method, 0, ""},
+ {"(*Reader).ReadRune", Method, 0, ""},
+ {"(*Reader).ReadSlice", Method, 0, ""},
+ {"(*Reader).ReadString", Method, 0, ""},
+ {"(*Reader).Reset", Method, 2, ""},
+ {"(*Reader).Size", Method, 10, ""},
+ {"(*Reader).UnreadByte", Method, 0, ""},
+ {"(*Reader).UnreadRune", Method, 0, ""},
+ {"(*Reader).WriteTo", Method, 1, ""},
+ {"(*Scanner).Buffer", Method, 6, ""},
+ {"(*Scanner).Bytes", Method, 1, ""},
+ {"(*Scanner).Err", Method, 1, ""},
+ {"(*Scanner).Scan", Method, 1, ""},
+ {"(*Scanner).Split", Method, 1, ""},
+ {"(*Scanner).Text", Method, 1, ""},
+ {"(*Writer).Available", Method, 0, ""},
+ {"(*Writer).AvailableBuffer", Method, 18, ""},
+ {"(*Writer).Buffered", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).ReadFrom", Method, 1, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Size", Method, 10, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(*Writer).WriteByte", Method, 0, ""},
+ {"(*Writer).WriteRune", Method, 0, ""},
+ {"(*Writer).WriteString", Method, 0, ""},
+ {"(ReadWriter).Available", Method, 0, ""},
+ {"(ReadWriter).AvailableBuffer", Method, 18, ""},
+ {"(ReadWriter).Discard", Method, 5, ""},
+ {"(ReadWriter).Flush", Method, 0, ""},
+ {"(ReadWriter).Peek", Method, 0, ""},
+ {"(ReadWriter).Read", Method, 0, ""},
+ {"(ReadWriter).ReadByte", Method, 0, ""},
+ {"(ReadWriter).ReadBytes", Method, 0, ""},
+ {"(ReadWriter).ReadFrom", Method, 1, ""},
+ {"(ReadWriter).ReadLine", Method, 0, ""},
+ {"(ReadWriter).ReadRune", Method, 0, ""},
+ {"(ReadWriter).ReadSlice", Method, 0, ""},
+ {"(ReadWriter).ReadString", Method, 0, ""},
+ {"(ReadWriter).UnreadByte", Method, 0, ""},
+ {"(ReadWriter).UnreadRune", Method, 0, ""},
+ {"(ReadWriter).Write", Method, 0, ""},
+ {"(ReadWriter).WriteByte", Method, 0, ""},
+ {"(ReadWriter).WriteRune", Method, 0, ""},
+ {"(ReadWriter).WriteString", Method, 0, ""},
+ {"(ReadWriter).WriteTo", Method, 1, ""},
+ {"ErrAdvanceTooFar", Var, 1, ""},
+ {"ErrBadReadCount", Var, 15, ""},
+ {"ErrBufferFull", Var, 0, ""},
+ {"ErrFinalToken", Var, 6, ""},
+ {"ErrInvalidUnreadByte", Var, 0, ""},
+ {"ErrInvalidUnreadRune", Var, 0, ""},
+ {"ErrNegativeAdvance", Var, 1, ""},
+ {"ErrNegativeCount", Var, 0, ""},
+ {"ErrTooLong", Var, 1, ""},
+ {"MaxScanTokenSize", Const, 1, ""},
+ {"NewReadWriter", Func, 0, "func(r *Reader, w *Writer) *ReadWriter"},
+ {"NewReader", Func, 0, "func(rd io.Reader) *Reader"},
+ {"NewReaderSize", Func, 0, "func(rd io.Reader, size int) *Reader"},
+ {"NewScanner", Func, 1, "func(r io.Reader) *Scanner"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"NewWriterSize", Func, 0, "func(w io.Writer, size int) *Writer"},
+ {"ReadWriter", Type, 0, ""},
+ {"ReadWriter.Reader", Field, 0, ""},
+ {"ReadWriter.Writer", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"ScanBytes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"ScanLines", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"ScanRunes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"ScanWords", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"Scanner", Type, 1, ""},
+ {"SplitFunc", Type, 1, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "bytes": {
+ {"(*Buffer).Available", Method, 21, ""},
+ {"(*Buffer).AvailableBuffer", Method, 21, ""},
+ {"(*Buffer).Bytes", Method, 0, ""},
+ {"(*Buffer).Cap", Method, 5, ""},
+ {"(*Buffer).Grow", Method, 1, ""},
+ {"(*Buffer).Len", Method, 0, ""},
+ {"(*Buffer).Next", Method, 0, ""},
+ {"(*Buffer).Read", Method, 0, ""},
+ {"(*Buffer).ReadByte", Method, 0, ""},
+ {"(*Buffer).ReadBytes", Method, 0, ""},
+ {"(*Buffer).ReadFrom", Method, 0, ""},
+ {"(*Buffer).ReadRune", Method, 0, ""},
+ {"(*Buffer).ReadString", Method, 0, ""},
+ {"(*Buffer).Reset", Method, 0, ""},
+ {"(*Buffer).String", Method, 0, ""},
+ {"(*Buffer).Truncate", Method, 0, ""},
+ {"(*Buffer).UnreadByte", Method, 0, ""},
+ {"(*Buffer).UnreadRune", Method, 0, ""},
+ {"(*Buffer).Write", Method, 0, ""},
+ {"(*Buffer).WriteByte", Method, 0, ""},
+ {"(*Buffer).WriteRune", Method, 0, ""},
+ {"(*Buffer).WriteString", Method, 0, ""},
+ {"(*Buffer).WriteTo", Method, 0, ""},
+ {"(*Reader).Len", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadAt", Method, 0, ""},
+ {"(*Reader).ReadByte", Method, 0, ""},
+ {"(*Reader).ReadRune", Method, 0, ""},
+ {"(*Reader).Reset", Method, 7, ""},
+ {"(*Reader).Seek", Method, 0, ""},
+ {"(*Reader).Size", Method, 5, ""},
+ {"(*Reader).UnreadByte", Method, 0, ""},
+ {"(*Reader).UnreadRune", Method, 0, ""},
+ {"(*Reader).WriteTo", Method, 1, ""},
+ {"Buffer", Type, 0, ""},
+ {"Clone", Func, 20, "func(b []byte) []byte"},
+ {"Compare", Func, 0, "func(a []byte, b []byte) int"},
+ {"Contains", Func, 0, "func(b []byte, subslice []byte) bool"},
+ {"ContainsAny", Func, 7, "func(b []byte, chars string) bool"},
+ {"ContainsFunc", Func, 21, "func(b []byte, f func(rune) bool) bool"},
+ {"ContainsRune", Func, 7, "func(b []byte, r rune) bool"},
+ {"Count", Func, 0, "func(s []byte, sep []byte) int"},
+ {"Cut", Func, 18, "func(s []byte, sep []byte) (before []byte, after []byte, found bool)"},
+ {"CutPrefix", Func, 20, "func(s []byte, prefix []byte) (after []byte, found bool)"},
+ {"CutSuffix", Func, 20, "func(s []byte, suffix []byte) (before []byte, found bool)"},
+ {"Equal", Func, 0, "func(a []byte, b []byte) bool"},
+ {"EqualFold", Func, 0, "func(s []byte, t []byte) bool"},
+ {"ErrTooLarge", Var, 0, ""},
+ {"Fields", Func, 0, "func(s []byte) [][]byte"},
+ {"FieldsFunc", Func, 0, "func(s []byte, f func(rune) bool) [][]byte"},
+ {"FieldsFuncSeq", Func, 24, "func(s []byte, f func(rune) bool) iter.Seq[[]byte]"},
+ {"FieldsSeq", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
+ {"HasPrefix", Func, 0, "func(s []byte, prefix []byte) bool"},
+ {"HasSuffix", Func, 0, "func(s []byte, suffix []byte) bool"},
+ {"Index", Func, 0, "func(s []byte, sep []byte) int"},
+ {"IndexAny", Func, 0, "func(s []byte, chars string) int"},
+ {"IndexByte", Func, 0, "func(b []byte, c byte) int"},
+ {"IndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
+ {"IndexRune", Func, 0, "func(s []byte, r rune) int"},
+ {"Join", Func, 0, "func(s [][]byte, sep []byte) []byte"},
+ {"LastIndex", Func, 0, "func(s []byte, sep []byte) int"},
+ {"LastIndexAny", Func, 0, "func(s []byte, chars string) int"},
+ {"LastIndexByte", Func, 5, "func(s []byte, c byte) int"},
+ {"LastIndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
+ {"Lines", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
+ {"Map", Func, 0, "func(mapping func(r rune) rune, s []byte) []byte"},
+ {"MinRead", Const, 0, ""},
+ {"NewBuffer", Func, 0, "func(buf []byte) *Buffer"},
+ {"NewBufferString", Func, 0, "func(s string) *Buffer"},
+ {"NewReader", Func, 0, "func(b []byte) *Reader"},
+ {"Reader", Type, 0, ""},
+ {"Repeat", Func, 0, "func(b []byte, count int) []byte"},
+ {"Replace", Func, 0, "func(s []byte, old []byte, new []byte, n int) []byte"},
+ {"ReplaceAll", Func, 12, "func(s []byte, old []byte, new []byte) []byte"},
+ {"Runes", Func, 0, "func(s []byte) []rune"},
+ {"Split", Func, 0, "func(s []byte, sep []byte) [][]byte"},
+ {"SplitAfter", Func, 0, "func(s []byte, sep []byte) [][]byte"},
+ {"SplitAfterN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
+ {"SplitAfterSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
+ {"SplitN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
+ {"SplitSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
+ {"Title", Func, 0, "func(s []byte) []byte"},
+ {"ToLower", Func, 0, "func(s []byte) []byte"},
+ {"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+ {"ToTitle", Func, 0, "func(s []byte) []byte"},
+ {"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+ {"ToUpper", Func, 0, "func(s []byte) []byte"},
+ {"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+ {"ToValidUTF8", Func, 13, "func(s []byte, replacement []byte) []byte"},
+ {"Trim", Func, 0, "func(s []byte, cutset string) []byte"},
+ {"TrimFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+ {"TrimLeft", Func, 0, "func(s []byte, cutset string) []byte"},
+ {"TrimLeftFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+ {"TrimPrefix", Func, 1, "func(s []byte, prefix []byte) []byte"},
+ {"TrimRight", Func, 0, "func(s []byte, cutset string) []byte"},
+ {"TrimRightFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+ {"TrimSpace", Func, 0, "func(s []byte) []byte"},
+ {"TrimSuffix", Func, 1, "func(s []byte, suffix []byte) []byte"},
+ },
+ "cmp": {
+ {"Compare", Func, 21, "func[T Ordered](x T, y T) int"},
+ {"Less", Func, 21, "func[T Ordered](x T, y T) bool"},
+ {"Or", Func, 22, "func[T comparable](vals ...T) T"},
+ {"Ordered", Type, 21, ""},
+ },
+ "compress/bzip2": {
+ {"(StructuralError).Error", Method, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"StructuralError", Type, 0, ""},
+ },
+ "compress/flate": {
+ {"(*ReadError).Error", Method, 0, ""},
+ {"(*WriteError).Error", Method, 0, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"(InternalError).Error", Method, 0, ""},
+ {"BestCompression", Const, 0, ""},
+ {"BestSpeed", Const, 0, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"DefaultCompression", Const, 0, ""},
+ {"HuffmanOnly", Const, 7, ""},
+ {"InternalError", Type, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) io.ReadCloser"},
+ {"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) io.ReadCloser"},
+ {"NewWriter", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+ {"NewWriterDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
+ {"NoCompression", Const, 0, ""},
+ {"ReadError", Type, 0, ""},
+ {"ReadError.Err", Field, 0, ""},
+ {"ReadError.Offset", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Resetter", Type, 4, ""},
+ {"WriteError", Type, 0, ""},
+ {"WriteError.Err", Field, 0, ""},
+ {"WriteError.Offset", Field, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "compress/gzip": {
+ {"(*Reader).Close", Method, 0, ""},
+ {"(*Reader).Multistream", Method, 4, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).Reset", Method, 3, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 1, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"BestCompression", Const, 0, ""},
+ {"BestSpeed", Const, 0, ""},
+ {"DefaultCompression", Const, 0, ""},
+ {"ErrChecksum", Var, 0, ""},
+ {"ErrHeader", Var, 0, ""},
+ {"Header", Type, 0, ""},
+ {"Header.Comment", Field, 0, ""},
+ {"Header.Extra", Field, 0, ""},
+ {"Header.ModTime", Field, 0, ""},
+ {"Header.Name", Field, 0, ""},
+ {"Header.OS", Field, 0, ""},
+ {"HuffmanOnly", Const, 8, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) (*Reader, error)"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+ {"NoCompression", Const, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.Header", Field, 0, ""},
+ {"Writer", Type, 0, ""},
+ {"Writer.Header", Field, 0, ""},
+ },
+ "compress/lzw": {
+ {"(*Reader).Close", Method, 17, ""},
+ {"(*Reader).Read", Method, 17, ""},
+ {"(*Reader).Reset", Method, 17, ""},
+ {"(*Writer).Close", Method, 17, ""},
+ {"(*Writer).Reset", Method, 17, ""},
+ {"(*Writer).Write", Method, 17, ""},
+ {"LSB", Const, 0, ""},
+ {"MSB", Const, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader, order Order, litWidth int) io.ReadCloser"},
+ {"NewWriter", Func, 0, "func(w io.Writer, order Order, litWidth int) io.WriteCloser"},
+ {"Order", Type, 0, ""},
+ {"Reader", Type, 17, ""},
+ {"Writer", Type, 17, ""},
+ },
+ "compress/zlib": {
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"BestCompression", Const, 0, ""},
+ {"BestSpeed", Const, 0, ""},
+ {"DefaultCompression", Const, 0, ""},
+ {"ErrChecksum", Var, 0, ""},
+ {"ErrDictionary", Var, 0, ""},
+ {"ErrHeader", Var, 0, ""},
+ {"HuffmanOnly", Const, 8, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) (io.ReadCloser, error)"},
+ {"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) (io.ReadCloser, error)"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+ {"NewWriterLevelDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
+ {"NoCompression", Const, 0, ""},
+ {"Resetter", Type, 4, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "container/heap": {
+ {"Fix", Func, 2, "func(h Interface, i int)"},
+ {"Init", Func, 0, "func(h Interface)"},
+ {"Interface", Type, 0, ""},
+ {"Pop", Func, 0, "func(h Interface) any"},
+ {"Push", Func, 0, "func(h Interface, x any)"},
+ {"Remove", Func, 0, "func(h Interface, i int) any"},
+ },
+ "container/list": {
+ {"(*Element).Next", Method, 0, ""},
+ {"(*Element).Prev", Method, 0, ""},
+ {"(*List).Back", Method, 0, ""},
+ {"(*List).Front", Method, 0, ""},
+ {"(*List).Init", Method, 0, ""},
+ {"(*List).InsertAfter", Method, 0, ""},
+ {"(*List).InsertBefore", Method, 0, ""},
+ {"(*List).Len", Method, 0, ""},
+ {"(*List).MoveAfter", Method, 2, ""},
+ {"(*List).MoveBefore", Method, 2, ""},
+ {"(*List).MoveToBack", Method, 0, ""},
+ {"(*List).MoveToFront", Method, 0, ""},
+ {"(*List).PushBack", Method, 0, ""},
+ {"(*List).PushBackList", Method, 0, ""},
+ {"(*List).PushFront", Method, 0, ""},
+ {"(*List).PushFrontList", Method, 0, ""},
+ {"(*List).Remove", Method, 0, ""},
+ {"Element", Type, 0, ""},
+ {"Element.Value", Field, 0, ""},
+ {"List", Type, 0, ""},
+ {"New", Func, 0, "func() *List"},
+ },
+ "container/ring": {
+ {"(*Ring).Do", Method, 0, ""},
+ {"(*Ring).Len", Method, 0, ""},
+ {"(*Ring).Link", Method, 0, ""},
+ {"(*Ring).Move", Method, 0, ""},
+ {"(*Ring).Next", Method, 0, ""},
+ {"(*Ring).Prev", Method, 0, ""},
+ {"(*Ring).Unlink", Method, 0, ""},
+ {"New", Func, 0, "func(n int) *Ring"},
+ {"Ring", Type, 0, ""},
+ {"Ring.Value", Field, 0, ""},
+ },
+ "context": {
+ {"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"},
+ {"Background", Func, 7, "func() Context"},
+ {"CancelCauseFunc", Type, 20, ""},
+ {"CancelFunc", Type, 7, ""},
+ {"Canceled", Var, 7, ""},
+ {"Cause", Func, 20, "func(c Context) error"},
+ {"Context", Type, 7, ""},
+ {"DeadlineExceeded", Var, 7, ""},
+ {"TODO", Func, 7, "func() Context"},
+ {"WithCancel", Func, 7, "func(parent Context) (ctx Context, cancel CancelFunc)"},
+ {"WithCancelCause", Func, 20, "func(parent Context) (ctx Context, cancel CancelCauseFunc)"},
+ {"WithDeadline", Func, 7, "func(parent Context, d time.Time) (Context, CancelFunc)"},
+ {"WithDeadlineCause", Func, 21, "func(parent Context, d time.Time, cause error) (Context, CancelFunc)"},
+ {"WithTimeout", Func, 7, "func(parent Context, timeout time.Duration) (Context, CancelFunc)"},
+ {"WithTimeoutCause", Func, 21, "func(parent Context, timeout time.Duration, cause error) (Context, CancelFunc)"},
+ {"WithValue", Func, 7, "func(parent Context, key any, val any) Context"},
+ {"WithoutCancel", Func, 21, "func(parent Context) Context"},
+ },
+ "crypto": {
+ {"(Hash).Available", Method, 0, ""},
+ {"(Hash).HashFunc", Method, 4, ""},
+ {"(Hash).New", Method, 0, ""},
+ {"(Hash).Size", Method, 0, ""},
+ {"(Hash).String", Method, 15, ""},
+ {"BLAKE2b_256", Const, 9, ""},
+ {"BLAKE2b_384", Const, 9, ""},
+ {"BLAKE2b_512", Const, 9, ""},
+ {"BLAKE2s_256", Const, 9, ""},
+ {"Decrypter", Type, 5, ""},
+ {"DecrypterOpts", Type, 5, ""},
+ {"Hash", Type, 0, ""},
+ {"MD4", Const, 0, ""},
+ {"MD5", Const, 0, ""},
+ {"MD5SHA1", Const, 0, ""},
+ {"MessageSigner", Type, 25, ""},
+ {"PrivateKey", Type, 0, ""},
+ {"PublicKey", Type, 2, ""},
+ {"RIPEMD160", Const, 0, ""},
+ {"RegisterHash", Func, 0, "func(h Hash, f func() hash.Hash)"},
+ {"SHA1", Const, 0, ""},
+ {"SHA224", Const, 0, ""},
+ {"SHA256", Const, 0, ""},
+ {"SHA384", Const, 0, ""},
+ {"SHA3_224", Const, 4, ""},
+ {"SHA3_256", Const, 4, ""},
+ {"SHA3_384", Const, 4, ""},
+ {"SHA3_512", Const, 4, ""},
+ {"SHA512", Const, 0, ""},
+ {"SHA512_224", Const, 5, ""},
+ {"SHA512_256", Const, 5, ""},
+ {"SignMessage", Func, 25, "func(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error)"},
+ {"Signer", Type, 4, ""},
+ {"SignerOpts", Type, 4, ""},
+ },
+ "crypto/aes": {
+ {"(KeySizeError).Error", Method, 0, ""},
+ {"BlockSize", Const, 0, ""},
+ {"KeySizeError", Type, 0, ""},
+ {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+ },
+ "crypto/cipher": {
+ {"(StreamReader).Read", Method, 0, ""},
+ {"(StreamWriter).Close", Method, 0, ""},
+ {"(StreamWriter).Write", Method, 0, ""},
+ {"AEAD", Type, 2, ""},
+ {"Block", Type, 0, ""},
+ {"BlockMode", Type, 0, ""},
+ {"NewCBCDecrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
+ {"NewCBCEncrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
+ {"NewCFBDecrypter", Func, 0, "func(block Block, iv []byte) Stream"},
+ {"NewCFBEncrypter", Func, 0, "func(block Block, iv []byte) Stream"},
+ {"NewCTR", Func, 0, "func(block Block, iv []byte) Stream"},
+ {"NewGCM", Func, 2, "func(cipher Block) (AEAD, error)"},
+ {"NewGCMWithNonceSize", Func, 5, "func(cipher Block, size int) (AEAD, error)"},
+ {"NewGCMWithRandomNonce", Func, 24, "func(cipher Block) (AEAD, error)"},
+ {"NewGCMWithTagSize", Func, 11, "func(cipher Block, tagSize int) (AEAD, error)"},
+ {"NewOFB", Func, 0, "func(b Block, iv []byte) Stream"},
+ {"Stream", Type, 0, ""},
+ {"StreamReader", Type, 0, ""},
+ {"StreamReader.R", Field, 0, ""},
+ {"StreamReader.S", Field, 0, ""},
+ {"StreamWriter", Type, 0, ""},
+ {"StreamWriter.Err", Field, 0, ""},
+ {"StreamWriter.S", Field, 0, ""},
+ {"StreamWriter.W", Field, 0, ""},
+ },
+ "crypto/des": {
+ {"(KeySizeError).Error", Method, 0, ""},
+ {"BlockSize", Const, 0, ""},
+ {"KeySizeError", Type, 0, ""},
+ {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+ {"NewTripleDESCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+ },
+ "crypto/dsa": {
+ {"ErrInvalidPublicKey", Var, 0, ""},
+ {"GenerateKey", Func, 0, "func(priv *PrivateKey, rand io.Reader) error"},
+ {"GenerateParameters", Func, 0, "func(params *Parameters, rand io.Reader, sizes ParameterSizes) error"},
+ {"L1024N160", Const, 0, ""},
+ {"L2048N224", Const, 0, ""},
+ {"L2048N256", Const, 0, ""},
+ {"L3072N256", Const, 0, ""},
+ {"ParameterSizes", Type, 0, ""},
+ {"Parameters", Type, 0, ""},
+ {"Parameters.G", Field, 0, ""},
+ {"Parameters.P", Field, 0, ""},
+ {"Parameters.Q", Field, 0, ""},
+ {"PrivateKey", Type, 0, ""},
+ {"PrivateKey.PublicKey", Field, 0, ""},
+ {"PrivateKey.X", Field, 0, ""},
+ {"PublicKey", Type, 0, ""},
+ {"PublicKey.Parameters", Field, 0, ""},
+ {"PublicKey.Y", Field, 0, ""},
+ {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
+ {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
+ },
+ "crypto/ecdh": {
+ {"(*PrivateKey).Bytes", Method, 20, ""},
+ {"(*PrivateKey).Curve", Method, 20, ""},
+ {"(*PrivateKey).ECDH", Method, 20, ""},
+ {"(*PrivateKey).Equal", Method, 20, ""},
+ {"(*PrivateKey).Public", Method, 20, ""},
+ {"(*PrivateKey).PublicKey", Method, 20, ""},
+ {"(*PublicKey).Bytes", Method, 20, ""},
+ {"(*PublicKey).Curve", Method, 20, ""},
+ {"(*PublicKey).Equal", Method, 20, ""},
+ {"Curve", Type, 20, ""},
+ {"P256", Func, 20, "func() Curve"},
+ {"P384", Func, 20, "func() Curve"},
+ {"P521", Func, 20, "func() Curve"},
+ {"PrivateKey", Type, 20, ""},
+ {"PublicKey", Type, 20, ""},
+ {"X25519", Func, 20, "func() Curve"},
+ },
+ "crypto/ecdsa": {
+ {"(*PrivateKey).Bytes", Method, 25, ""},
+ {"(*PrivateKey).ECDH", Method, 20, ""},
+ {"(*PrivateKey).Equal", Method, 15, ""},
+ {"(*PrivateKey).Public", Method, 4, ""},
+ {"(*PrivateKey).Sign", Method, 4, ""},
+ {"(*PublicKey).Bytes", Method, 25, ""},
+ {"(*PublicKey).ECDH", Method, 20, ""},
+ {"(*PublicKey).Equal", Method, 15, ""},
+ {"(PrivateKey).Add", Method, 0, ""},
+ {"(PrivateKey).Double", Method, 0, ""},
+ {"(PrivateKey).IsOnCurve", Method, 0, ""},
+ {"(PrivateKey).Params", Method, 0, ""},
+ {"(PrivateKey).ScalarBaseMult", Method, 0, ""},
+ {"(PrivateKey).ScalarMult", Method, 0, ""},
+ {"(PublicKey).Add", Method, 0, ""},
+ {"(PublicKey).Double", Method, 0, ""},
+ {"(PublicKey).IsOnCurve", Method, 0, ""},
+ {"(PublicKey).Params", Method, 0, ""},
+ {"(PublicKey).ScalarBaseMult", Method, 0, ""},
+ {"(PublicKey).ScalarMult", Method, 0, ""},
+ {"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"},
+ {"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"},
+ {"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"},
+ {"PrivateKey", Type, 0, ""},
+ {"PrivateKey.D", Field, 0, ""},
+ {"PrivateKey.PublicKey", Field, 0, ""},
+ {"PublicKey", Type, 0, ""},
+ {"PublicKey.Curve", Field, 0, ""},
+ {"PublicKey.X", Field, 0, ""},
+ {"PublicKey.Y", Field, 0, ""},
+ {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
+ {"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"},
+ {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
+ {"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"},
+ },
+ "crypto/ed25519": {
+ {"(*Options).HashFunc", Method, 20, ""},
+ {"(PrivateKey).Equal", Method, 15, ""},
+ {"(PrivateKey).Public", Method, 13, ""},
+ {"(PrivateKey).Seed", Method, 13, ""},
+ {"(PrivateKey).Sign", Method, 13, ""},
+ {"(PublicKey).Equal", Method, 15, ""},
+ {"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"},
+ {"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"},
+ {"Options", Type, 20, ""},
+ {"Options.Context", Field, 20, ""},
+ {"Options.Hash", Field, 20, ""},
+ {"PrivateKey", Type, 13, ""},
+ {"PrivateKeySize", Const, 13, ""},
+ {"PublicKey", Type, 13, ""},
+ {"PublicKeySize", Const, 13, ""},
+ {"SeedSize", Const, 13, ""},
+ {"Sign", Func, 13, "func(privateKey PrivateKey, message []byte) []byte"},
+ {"SignatureSize", Const, 13, ""},
+ {"Verify", Func, 13, "func(publicKey PublicKey, message []byte, sig []byte) bool"},
+ {"VerifyWithOptions", Func, 20, "func(publicKey PublicKey, message []byte, sig []byte, opts *Options) error"},
+ },
+ "crypto/elliptic": {
+ {"(*CurveParams).Add", Method, 0, ""},
+ {"(*CurveParams).Double", Method, 0, ""},
+ {"(*CurveParams).IsOnCurve", Method, 0, ""},
+ {"(*CurveParams).Params", Method, 0, ""},
+ {"(*CurveParams).ScalarBaseMult", Method, 0, ""},
+ {"(*CurveParams).ScalarMult", Method, 0, ""},
+ {"Curve", Type, 0, ""},
+ {"CurveParams", Type, 0, ""},
+ {"CurveParams.B", Field, 0, ""},
+ {"CurveParams.BitSize", Field, 0, ""},
+ {"CurveParams.Gx", Field, 0, ""},
+ {"CurveParams.Gy", Field, 0, ""},
+ {"CurveParams.N", Field, 0, ""},
+ {"CurveParams.Name", Field, 5, ""},
+ {"CurveParams.P", Field, 0, ""},
+ {"GenerateKey", Func, 0, "func(curve Curve, rand io.Reader) (priv []byte, x *big.Int, y *big.Int, err error)"},
+ {"Marshal", Func, 0, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
+ {"MarshalCompressed", Func, 15, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
+ {"P224", Func, 0, "func() Curve"},
+ {"P256", Func, 0, "func() Curve"},
+ {"P384", Func, 0, "func() Curve"},
+ {"P521", Func, 0, "func() Curve"},
+ {"Unmarshal", Func, 0, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
+ {"UnmarshalCompressed", Func, 15, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
+ },
+ "crypto/fips140": {
+ {"Enabled", Func, 24, "func() bool"},
+ },
+ "crypto/hkdf": {
+ {"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"},
+ {"Extract", Func, 24, "func[H hash.Hash](h func() H, secret []byte, salt []byte) ([]byte, error)"},
+ {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, secret []byte, salt []byte, info string, keyLength int) ([]byte, error)"},
+ },
+ "crypto/hmac": {
+ {"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"},
+ {"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"},
+ },
+ "crypto/md5": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Sum", Func, 2, "func(data []byte) [16]byte"},
+ },
+ "crypto/mlkem": {
+ {"(*DecapsulationKey1024).Bytes", Method, 24, ""},
+ {"(*DecapsulationKey1024).Decapsulate", Method, 24, ""},
+ {"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""},
+ {"(*DecapsulationKey768).Bytes", Method, 24, ""},
+ {"(*DecapsulationKey768).Decapsulate", Method, 24, ""},
+ {"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""},
+ {"(*EncapsulationKey1024).Bytes", Method, 24, ""},
+ {"(*EncapsulationKey1024).Encapsulate", Method, 24, ""},
+ {"(*EncapsulationKey768).Bytes", Method, 24, ""},
+ {"(*EncapsulationKey768).Encapsulate", Method, 24, ""},
+ {"CiphertextSize1024", Const, 24, ""},
+ {"CiphertextSize768", Const, 24, ""},
+ {"DecapsulationKey1024", Type, 24, ""},
+ {"DecapsulationKey768", Type, 24, ""},
+ {"EncapsulationKey1024", Type, 24, ""},
+ {"EncapsulationKey768", Type, 24, ""},
+ {"EncapsulationKeySize1024", Const, 24, ""},
+ {"EncapsulationKeySize768", Const, 24, ""},
+ {"GenerateKey1024", Func, 24, "func() (*DecapsulationKey1024, error)"},
+ {"GenerateKey768", Func, 24, "func() (*DecapsulationKey768, error)"},
+ {"NewDecapsulationKey1024", Func, 24, "func(seed []byte) (*DecapsulationKey1024, error)"},
+ {"NewDecapsulationKey768", Func, 24, "func(seed []byte) (*DecapsulationKey768, error)"},
+ {"NewEncapsulationKey1024", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey1024, error)"},
+ {"NewEncapsulationKey768", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey768, error)"},
+ {"SeedSize", Const, 24, ""},
+ {"SharedKeySize", Const, 24, ""},
+ },
+ "crypto/pbkdf2": {
+ {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"},
+ },
+ "crypto/rand": {
+ {"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"},
+ {"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"},
+ {"Read", Func, 0, "func(b []byte) (n int, err error)"},
+ {"Reader", Var, 0, ""},
+ {"Text", Func, 24, "func() string"},
+ },
+ "crypto/rc4": {
+ {"(*Cipher).Reset", Method, 0, ""},
+ {"(*Cipher).XORKeyStream", Method, 0, ""},
+ {"(KeySizeError).Error", Method, 0, ""},
+ {"Cipher", Type, 0, ""},
+ {"KeySizeError", Type, 0, ""},
+ {"NewCipher", Func, 0, "func(key []byte) (*Cipher, error)"},
+ },
+ "crypto/rsa": {
+ {"(*PSSOptions).HashFunc", Method, 4, ""},
+ {"(*PrivateKey).Decrypt", Method, 5, ""},
+ {"(*PrivateKey).Equal", Method, 15, ""},
+ {"(*PrivateKey).Precompute", Method, 0, ""},
+ {"(*PrivateKey).Public", Method, 4, ""},
+ {"(*PrivateKey).Sign", Method, 4, ""},
+ {"(*PrivateKey).Size", Method, 11, ""},
+ {"(*PrivateKey).Validate", Method, 0, ""},
+ {"(*PublicKey).Equal", Method, 15, ""},
+ {"(*PublicKey).Size", Method, 11, ""},
+ {"CRTValue", Type, 0, ""},
+ {"CRTValue.Coeff", Field, 0, ""},
+ {"CRTValue.Exp", Field, 0, ""},
+ {"CRTValue.R", Field, 0, ""},
+ {"DecryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error)"},
+ {"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"},
+ {"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"},
+ {"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"},
+ {"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"},
+ {"ErrDecryption", Var, 0, ""},
+ {"ErrMessageTooLong", Var, 0, ""},
+ {"ErrVerification", Var, 0, ""},
+ {"GenerateKey", Func, 0, "func(random io.Reader, bits int) (*PrivateKey, error)"},
+ {"GenerateMultiPrimeKey", Func, 0, "func(random io.Reader, nprimes int, bits int) (*PrivateKey, error)"},
+ {"OAEPOptions", Type, 5, ""},
+ {"OAEPOptions.Hash", Field, 5, ""},
+ {"OAEPOptions.Label", Field, 5, ""},
+ {"OAEPOptions.MGFHash", Field, 20, ""},
+ {"PKCS1v15DecryptOptions", Type, 5, ""},
+ {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5, ""},
+ {"PSSOptions", Type, 2, ""},
+ {"PSSOptions.Hash", Field, 4, ""},
+ {"PSSOptions.SaltLength", Field, 2, ""},
+ {"PSSSaltLengthAuto", Const, 2, ""},
+ {"PSSSaltLengthEqualsHash", Const, 2, ""},
+ {"PrecomputedValues", Type, 0, ""},
+ {"PrecomputedValues.CRTValues", Field, 0, ""},
+ {"PrecomputedValues.Dp", Field, 0, ""},
+ {"PrecomputedValues.Dq", Field, 0, ""},
+ {"PrecomputedValues.Qinv", Field, 0, ""},
+ {"PrivateKey", Type, 0, ""},
+ {"PrivateKey.D", Field, 0, ""},
+ {"PrivateKey.Precomputed", Field, 0, ""},
+ {"PrivateKey.Primes", Field, 0, ""},
+ {"PrivateKey.PublicKey", Field, 0, ""},
+ {"PublicKey", Type, 0, ""},
+ {"PublicKey.E", Field, 0, ""},
+ {"PublicKey.N", Field, 0, ""},
+ {"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"},
+ {"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"},
+ {"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"},
+ {"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"},
+ },
+ "crypto/sha1": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Sum", Func, 2, "func(data []byte) [20]byte"},
+ },
+ "crypto/sha256": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"New224", Func, 0, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Size224", Const, 0, ""},
+ {"Sum224", Func, 2, "func(data []byte) [28]byte"},
+ {"Sum256", Func, 2, "func(data []byte) [32]byte"},
+ },
+ "crypto/sha3": {
+ {"(*SHA3).AppendBinary", Method, 24, ""},
+ {"(*SHA3).BlockSize", Method, 24, ""},
+ {"(*SHA3).Clone", Method, 25, ""},
+ {"(*SHA3).MarshalBinary", Method, 24, ""},
+ {"(*SHA3).Reset", Method, 24, ""},
+ {"(*SHA3).Size", Method, 24, ""},
+ {"(*SHA3).Sum", Method, 24, ""},
+ {"(*SHA3).UnmarshalBinary", Method, 24, ""},
+ {"(*SHA3).Write", Method, 24, ""},
+ {"(*SHAKE).AppendBinary", Method, 24, ""},
+ {"(*SHAKE).BlockSize", Method, 24, ""},
+ {"(*SHAKE).MarshalBinary", Method, 24, ""},
+ {"(*SHAKE).Read", Method, 24, ""},
+ {"(*SHAKE).Reset", Method, 24, ""},
+ {"(*SHAKE).UnmarshalBinary", Method, 24, ""},
+ {"(*SHAKE).Write", Method, 24, ""},
+ {"New224", Func, 24, "func() *SHA3"},
+ {"New256", Func, 24, "func() *SHA3"},
+ {"New384", Func, 24, "func() *SHA3"},
+ {"New512", Func, 24, "func() *SHA3"},
+ {"NewCSHAKE128", Func, 24, "func(N []byte, S []byte) *SHAKE"},
+ {"NewCSHAKE256", Func, 24, "func(N []byte, S []byte) *SHAKE"},
+ {"NewSHAKE128", Func, 24, "func() *SHAKE"},
+ {"NewSHAKE256", Func, 24, "func() *SHAKE"},
+ {"SHA3", Type, 24, ""},
+ {"SHAKE", Type, 24, ""},
+ {"Sum224", Func, 24, "func(data []byte) [28]byte"},
+ {"Sum256", Func, 24, "func(data []byte) [32]byte"},
+ {"Sum384", Func, 24, "func(data []byte) [48]byte"},
+ {"Sum512", Func, 24, "func(data []byte) [64]byte"},
+ {"SumSHAKE128", Func, 24, "func(data []byte, length int) []byte"},
+ {"SumSHAKE256", Func, 24, "func(data []byte, length int) []byte"},
+ },
+ "crypto/sha512": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"New384", Func, 0, "func() hash.Hash"},
+ {"New512_224", Func, 5, "func() hash.Hash"},
+ {"New512_256", Func, 5, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Size224", Const, 5, ""},
+ {"Size256", Const, 5, ""},
+ {"Size384", Const, 0, ""},
+ {"Sum384", Func, 2, "func(data []byte) [48]byte"},
+ {"Sum512", Func, 2, "func(data []byte) [64]byte"},
+ {"Sum512_224", Func, 5, "func(data []byte) [28]byte"},
+ {"Sum512_256", Func, 5, "func(data []byte) [32]byte"},
+ },
+ "crypto/subtle": {
+ {"ConstantTimeByteEq", Func, 0, "func(x uint8, y uint8) int"},
+ {"ConstantTimeCompare", Func, 0, "func(x []byte, y []byte) int"},
+ {"ConstantTimeCopy", Func, 0, "func(v int, x []byte, y []byte)"},
+ {"ConstantTimeEq", Func, 0, "func(x int32, y int32) int"},
+ {"ConstantTimeLessOrEq", Func, 2, "func(x int, y int) int"},
+ {"ConstantTimeSelect", Func, 0, "func(v int, x int, y int) int"},
+ {"WithDataIndependentTiming", Func, 24, "func(f func())"},
+ {"XORBytes", Func, 20, "func(dst []byte, x []byte, y []byte) int"},
+ },
+ "crypto/tls": {
+ {"(*CertificateRequestInfo).Context", Method, 17, ""},
+ {"(*CertificateRequestInfo).SupportsCertificate", Method, 14, ""},
+ {"(*CertificateVerificationError).Error", Method, 20, ""},
+ {"(*CertificateVerificationError).Unwrap", Method, 20, ""},
+ {"(*ClientHelloInfo).Context", Method, 17, ""},
+ {"(*ClientHelloInfo).SupportsCertificate", Method, 14, ""},
+ {"(*ClientSessionState).ResumptionState", Method, 21, ""},
+ {"(*Config).BuildNameToCertificate", Method, 0, ""},
+ {"(*Config).Clone", Method, 8, ""},
+ {"(*Config).DecryptTicket", Method, 21, ""},
+ {"(*Config).EncryptTicket", Method, 21, ""},
+ {"(*Config).SetSessionTicketKeys", Method, 5, ""},
+ {"(*Conn).Close", Method, 0, ""},
+ {"(*Conn).CloseWrite", Method, 8, ""},
+ {"(*Conn).ConnectionState", Method, 0, ""},
+ {"(*Conn).Handshake", Method, 0, ""},
+ {"(*Conn).HandshakeContext", Method, 17, ""},
+ {"(*Conn).LocalAddr", Method, 0, ""},
+ {"(*Conn).NetConn", Method, 18, ""},
+ {"(*Conn).OCSPResponse", Method, 0, ""},
+ {"(*Conn).Read", Method, 0, ""},
+ {"(*Conn).RemoteAddr", Method, 0, ""},
+ {"(*Conn).SetDeadline", Method, 0, ""},
+ {"(*Conn).SetReadDeadline", Method, 0, ""},
+ {"(*Conn).SetWriteDeadline", Method, 0, ""},
+ {"(*Conn).VerifyHostname", Method, 0, ""},
+ {"(*Conn).Write", Method, 0, ""},
+ {"(*ConnectionState).ExportKeyingMaterial", Method, 11, ""},
+ {"(*Dialer).Dial", Method, 15, ""},
+ {"(*Dialer).DialContext", Method, 15, ""},
+ {"(*ECHRejectionError).Error", Method, 23, ""},
+ {"(*QUICConn).Close", Method, 21, ""},
+ {"(*QUICConn).ConnectionState", Method, 21, ""},
+ {"(*QUICConn).HandleData", Method, 21, ""},
+ {"(*QUICConn).NextEvent", Method, 21, ""},
+ {"(*QUICConn).SendSessionTicket", Method, 21, ""},
+ {"(*QUICConn).SetTransportParameters", Method, 21, ""},
+ {"(*QUICConn).Start", Method, 21, ""},
+ {"(*QUICConn).StoreSession", Method, 23, ""},
+ {"(*SessionState).Bytes", Method, 21, ""},
+ {"(AlertError).Error", Method, 21, ""},
+ {"(ClientAuthType).String", Method, 15, ""},
+ {"(CurveID).String", Method, 15, ""},
+ {"(QUICEncryptionLevel).String", Method, 21, ""},
+ {"(RecordHeaderError).Error", Method, 6, ""},
+ {"(SignatureScheme).String", Method, 15, ""},
+ {"AlertError", Type, 21, ""},
+ {"Certificate", Type, 0, ""},
+ {"Certificate.Certificate", Field, 0, ""},
+ {"Certificate.Leaf", Field, 0, ""},
+ {"Certificate.OCSPStaple", Field, 0, ""},
+ {"Certificate.PrivateKey", Field, 0, ""},
+ {"Certificate.SignedCertificateTimestamps", Field, 5, ""},
+ {"Certificate.SupportedSignatureAlgorithms", Field, 14, ""},
+ {"CertificateRequestInfo", Type, 8, ""},
+ {"CertificateRequestInfo.AcceptableCAs", Field, 8, ""},
+ {"CertificateRequestInfo.SignatureSchemes", Field, 8, ""},
+ {"CertificateRequestInfo.Version", Field, 14, ""},
+ {"CertificateVerificationError", Type, 20, ""},
+ {"CertificateVerificationError.Err", Field, 20, ""},
+ {"CertificateVerificationError.UnverifiedCertificates", Field, 20, ""},
+ {"CipherSuite", Type, 14, ""},
+ {"CipherSuite.ID", Field, 14, ""},
+ {"CipherSuite.Insecure", Field, 14, ""},
+ {"CipherSuite.Name", Field, 14, ""},
+ {"CipherSuite.SupportedVersions", Field, 14, ""},
+ {"CipherSuiteName", Func, 14, "func(id uint16) string"},
+ {"CipherSuites", Func, 14, "func() []*CipherSuite"},
+ {"Client", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
+ {"ClientAuthType", Type, 0, ""},
+ {"ClientHelloInfo", Type, 4, ""},
+ {"ClientHelloInfo.CipherSuites", Field, 4, ""},
+ {"ClientHelloInfo.Conn", Field, 8, ""},
+ {"ClientHelloInfo.Extensions", Field, 24, ""},
+ {"ClientHelloInfo.ServerName", Field, 4, ""},
+ {"ClientHelloInfo.SignatureSchemes", Field, 8, ""},
+ {"ClientHelloInfo.SupportedCurves", Field, 4, ""},
+ {"ClientHelloInfo.SupportedPoints", Field, 4, ""},
+ {"ClientHelloInfo.SupportedProtos", Field, 8, ""},
+ {"ClientHelloInfo.SupportedVersions", Field, 8, ""},
+ {"ClientSessionCache", Type, 3, ""},
+ {"ClientSessionState", Type, 3, ""},
+ {"Config", Type, 0, ""},
+ {"Config.Certificates", Field, 0, ""},
+ {"Config.CipherSuites", Field, 0, ""},
+ {"Config.ClientAuth", Field, 0, ""},
+ {"Config.ClientCAs", Field, 0, ""},
+ {"Config.ClientSessionCache", Field, 3, ""},
+ {"Config.CurvePreferences", Field, 3, ""},
+ {"Config.DynamicRecordSizingDisabled", Field, 7, ""},
+ {"Config.EncryptedClientHelloConfigList", Field, 23, ""},
+ {"Config.EncryptedClientHelloKeys", Field, 24, ""},
+ {"Config.EncryptedClientHelloRejectionVerify", Field, 23, ""},
+ {"Config.GetCertificate", Field, 4, ""},
+ {"Config.GetClientCertificate", Field, 8, ""},
+ {"Config.GetConfigForClient", Field, 8, ""},
+ {"Config.GetEncryptedClientHelloKeys", Field, 25, ""},
+ {"Config.InsecureSkipVerify", Field, 0, ""},
+ {"Config.KeyLogWriter", Field, 8, ""},
+ {"Config.MaxVersion", Field, 2, ""},
+ {"Config.MinVersion", Field, 2, ""},
+ {"Config.NameToCertificate", Field, 0, ""},
+ {"Config.NextProtos", Field, 0, ""},
+ {"Config.PreferServerCipherSuites", Field, 1, ""},
+ {"Config.Rand", Field, 0, ""},
+ {"Config.Renegotiation", Field, 7, ""},
+ {"Config.RootCAs", Field, 0, ""},
+ {"Config.ServerName", Field, 0, ""},
+ {"Config.SessionTicketKey", Field, 1, ""},
+ {"Config.SessionTicketsDisabled", Field, 1, ""},
+ {"Config.Time", Field, 0, ""},
+ {"Config.UnwrapSession", Field, 21, ""},
+ {"Config.VerifyConnection", Field, 15, ""},
+ {"Config.VerifyPeerCertificate", Field, 8, ""},
+ {"Config.WrapSession", Field, 21, ""},
+ {"Conn", Type, 0, ""},
+ {"ConnectionState", Type, 0, ""},
+ {"ConnectionState.CipherSuite", Field, 0, ""},
+ {"ConnectionState.CurveID", Field, 25, ""},
+ {"ConnectionState.DidResume", Field, 1, ""},
+ {"ConnectionState.ECHAccepted", Field, 23, ""},
+ {"ConnectionState.HandshakeComplete", Field, 0, ""},
+ {"ConnectionState.NegotiatedProtocol", Field, 0, ""},
+ {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""},
+ {"ConnectionState.OCSPResponse", Field, 5, ""},
+ {"ConnectionState.PeerCertificates", Field, 0, ""},
+ {"ConnectionState.ServerName", Field, 0, ""},
+ {"ConnectionState.SignedCertificateTimestamps", Field, 5, ""},
+ {"ConnectionState.TLSUnique", Field, 4, ""},
+ {"ConnectionState.VerifiedChains", Field, 0, ""},
+ {"ConnectionState.Version", Field, 3, ""},
+ {"CurveID", Type, 3, ""},
+ {"CurveP256", Const, 3, ""},
+ {"CurveP384", Const, 3, ""},
+ {"CurveP521", Const, 3, ""},
+ {"Dial", Func, 0, "func(network string, addr string, config *Config) (*Conn, error)"},
+ {"DialWithDialer", Func, 3, "func(dialer *net.Dialer, network string, addr string, config *Config) (*Conn, error)"},
+ {"Dialer", Type, 15, ""},
+ {"Dialer.Config", Field, 15, ""},
+ {"Dialer.NetDialer", Field, 15, ""},
+ {"ECDSAWithP256AndSHA256", Const, 8, ""},
+ {"ECDSAWithP384AndSHA384", Const, 8, ""},
+ {"ECDSAWithP521AndSHA512", Const, 8, ""},
+ {"ECDSAWithSHA1", Const, 10, ""},
+ {"ECHRejectionError", Type, 23, ""},
+ {"ECHRejectionError.RetryConfigList", Field, 23, ""},
+ {"Ed25519", Const, 13, ""},
+ {"EncryptedClientHelloKey", Type, 24, ""},
+ {"EncryptedClientHelloKey.Config", Field, 24, ""},
+ {"EncryptedClientHelloKey.PrivateKey", Field, 24, ""},
+ {"EncryptedClientHelloKey.SendAsRetry", Field, 24, ""},
+ {"InsecureCipherSuites", Func, 14, "func() []*CipherSuite"},
+ {"Listen", Func, 0, "func(network string, laddr string, config *Config) (net.Listener, error)"},
+ {"LoadX509KeyPair", Func, 0, "func(certFile string, keyFile string) (Certificate, error)"},
+ {"NewLRUClientSessionCache", Func, 3, "func(capacity int) ClientSessionCache"},
+ {"NewListener", Func, 0, "func(inner net.Listener, config *Config) net.Listener"},
+ {"NewResumptionState", Func, 21, "func(ticket []byte, state *SessionState) (*ClientSessionState, error)"},
+ {"NoClientCert", Const, 0, ""},
+ {"PKCS1WithSHA1", Const, 8, ""},
+ {"PKCS1WithSHA256", Const, 8, ""},
+ {"PKCS1WithSHA384", Const, 8, ""},
+ {"PKCS1WithSHA512", Const, 8, ""},
+ {"PSSWithSHA256", Const, 8, ""},
+ {"PSSWithSHA384", Const, 8, ""},
+ {"PSSWithSHA512", Const, 8, ""},
+ {"ParseSessionState", Func, 21, "func(data []byte) (*SessionState, error)"},
+ {"QUICClient", Func, 21, "func(config *QUICConfig) *QUICConn"},
+ {"QUICConfig", Type, 21, ""},
+ {"QUICConfig.EnableSessionEvents", Field, 23, ""},
+ {"QUICConfig.TLSConfig", Field, 21, ""},
+ {"QUICConn", Type, 21, ""},
+ {"QUICEncryptionLevel", Type, 21, ""},
+ {"QUICEncryptionLevelApplication", Const, 21, ""},
+ {"QUICEncryptionLevelEarly", Const, 21, ""},
+ {"QUICEncryptionLevelHandshake", Const, 21, ""},
+ {"QUICEncryptionLevelInitial", Const, 21, ""},
+ {"QUICEvent", Type, 21, ""},
+ {"QUICEvent.Data", Field, 21, ""},
+ {"QUICEvent.Kind", Field, 21, ""},
+ {"QUICEvent.Level", Field, 21, ""},
+ {"QUICEvent.SessionState", Field, 23, ""},
+ {"QUICEvent.Suite", Field, 21, ""},
+ {"QUICEventKind", Type, 21, ""},
+ {"QUICHandshakeDone", Const, 21, ""},
+ {"QUICNoEvent", Const, 21, ""},
+ {"QUICRejectedEarlyData", Const, 21, ""},
+ {"QUICResumeSession", Const, 23, ""},
+ {"QUICServer", Func, 21, "func(config *QUICConfig) *QUICConn"},
+ {"QUICSessionTicketOptions", Type, 21, ""},
+ {"QUICSessionTicketOptions.EarlyData", Field, 21, ""},
+ {"QUICSessionTicketOptions.Extra", Field, 23, ""},
+ {"QUICSetReadSecret", Const, 21, ""},
+ {"QUICSetWriteSecret", Const, 21, ""},
+ {"QUICStoreSession", Const, 23, ""},
+ {"QUICTransportParameters", Const, 21, ""},
+ {"QUICTransportParametersRequired", Const, 21, ""},
+ {"QUICWriteData", Const, 21, ""},
+ {"RecordHeaderError", Type, 6, ""},
+ {"RecordHeaderError.Conn", Field, 12, ""},
+ {"RecordHeaderError.Msg", Field, 6, ""},
+ {"RecordHeaderError.RecordHeader", Field, 6, ""},
+ {"RenegotiateFreelyAsClient", Const, 7, ""},
+ {"RenegotiateNever", Const, 7, ""},
+ {"RenegotiateOnceAsClient", Const, 7, ""},
+ {"RenegotiationSupport", Type, 7, ""},
+ {"RequestClientCert", Const, 0, ""},
+ {"RequireAndVerifyClientCert", Const, 0, ""},
+ {"RequireAnyClientCert", Const, 0, ""},
+ {"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
+ {"SessionState", Type, 21, ""},
+ {"SessionState.EarlyData", Field, 21, ""},
+ {"SessionState.Extra", Field, 21, ""},
+ {"SignatureScheme", Type, 8, ""},
+ {"TLS_AES_128_GCM_SHA256", Const, 12, ""},
+ {"TLS_AES_256_GCM_SHA384", Const, 12, ""},
+ {"TLS_CHACHA20_POLY1305_SHA256", Const, 12, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
+ {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
+ {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
+ {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2, ""},
+ {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
+ {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
+ {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
+ {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0, ""},
+ {"TLS_FALLBACK_SCSV", Const, 4, ""},
+ {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
+ {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
+ {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+ {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6, ""},
+ {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
+ {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6, ""},
+ {"TLS_RSA_WITH_RC4_128_SHA", Const, 0, ""},
+ {"VerifyClientCertIfGiven", Const, 0, ""},
+ {"VersionName", Func, 21, "func(version uint16) string"},
+ {"VersionSSL30", Const, 2, ""},
+ {"VersionTLS10", Const, 2, ""},
+ {"VersionTLS11", Const, 2, ""},
+ {"VersionTLS12", Const, 2, ""},
+ {"VersionTLS13", Const, 12, ""},
+ {"X25519", Const, 8, ""},
+ {"X25519MLKEM768", Const, 24, ""},
+ {"X509KeyPair", Func, 0, "func(certPEMBlock []byte, keyPEMBlock []byte) (Certificate, error)"},
+ },
+ "crypto/x509": {
+ {"(*CertPool).AddCert", Method, 0, ""},
+ {"(*CertPool).AddCertWithConstraint", Method, 22, ""},
+ {"(*CertPool).AppendCertsFromPEM", Method, 0, ""},
+ {"(*CertPool).Clone", Method, 19, ""},
+ {"(*CertPool).Equal", Method, 19, ""},
+ {"(*CertPool).Subjects", Method, 0, ""},
+ {"(*Certificate).CheckCRLSignature", Method, 0, ""},
+ {"(*Certificate).CheckSignature", Method, 0, ""},
+ {"(*Certificate).CheckSignatureFrom", Method, 0, ""},
+ {"(*Certificate).CreateCRL", Method, 0, ""},
+ {"(*Certificate).Equal", Method, 0, ""},
+ {"(*Certificate).Verify", Method, 0, ""},
+ {"(*Certificate).VerifyHostname", Method, 0, ""},
+ {"(*CertificateRequest).CheckSignature", Method, 5, ""},
+ {"(*OID).UnmarshalBinary", Method, 23, ""},
+ {"(*OID).UnmarshalText", Method, 23, ""},
+ {"(*RevocationList).CheckSignatureFrom", Method, 19, ""},
+ {"(CertificateInvalidError).Error", Method, 0, ""},
+ {"(ConstraintViolationError).Error", Method, 0, ""},
+ {"(HostnameError).Error", Method, 0, ""},
+ {"(InsecureAlgorithmError).Error", Method, 6, ""},
+ {"(OID).AppendBinary", Method, 24, ""},
+ {"(OID).AppendText", Method, 24, ""},
+ {"(OID).Equal", Method, 22, ""},
+ {"(OID).EqualASN1OID", Method, 22, ""},
+ {"(OID).MarshalBinary", Method, 23, ""},
+ {"(OID).MarshalText", Method, 23, ""},
+ {"(OID).String", Method, 22, ""},
+ {"(PublicKeyAlgorithm).String", Method, 10, ""},
+ {"(SignatureAlgorithm).String", Method, 6, ""},
+ {"(SystemRootsError).Error", Method, 1, ""},
+ {"(SystemRootsError).Unwrap", Method, 16, ""},
+ {"(UnhandledCriticalExtension).Error", Method, 0, ""},
+ {"(UnknownAuthorityError).Error", Method, 0, ""},
+ {"CANotAuthorizedForExtKeyUsage", Const, 10, ""},
+ {"CANotAuthorizedForThisName", Const, 0, ""},
+ {"CertPool", Type, 0, ""},
+ {"Certificate", Type, 0, ""},
+ {"Certificate.AuthorityKeyId", Field, 0, ""},
+ {"Certificate.BasicConstraintsValid", Field, 0, ""},
+ {"Certificate.CRLDistributionPoints", Field, 2, ""},
+ {"Certificate.DNSNames", Field, 0, ""},
+ {"Certificate.EmailAddresses", Field, 0, ""},
+ {"Certificate.ExcludedDNSDomains", Field, 9, ""},
+ {"Certificate.ExcludedEmailAddresses", Field, 10, ""},
+ {"Certificate.ExcludedIPRanges", Field, 10, ""},
+ {"Certificate.ExcludedURIDomains", Field, 10, ""},
+ {"Certificate.ExtKeyUsage", Field, 0, ""},
+ {"Certificate.Extensions", Field, 2, ""},
+ {"Certificate.ExtraExtensions", Field, 2, ""},
+ {"Certificate.IPAddresses", Field, 1, ""},
+ {"Certificate.InhibitAnyPolicy", Field, 24, ""},
+ {"Certificate.InhibitAnyPolicyZero", Field, 24, ""},
+ {"Certificate.InhibitPolicyMapping", Field, 24, ""},
+ {"Certificate.InhibitPolicyMappingZero", Field, 24, ""},
+ {"Certificate.IsCA", Field, 0, ""},
+ {"Certificate.Issuer", Field, 0, ""},
+ {"Certificate.IssuingCertificateURL", Field, 2, ""},
+ {"Certificate.KeyUsage", Field, 0, ""},
+ {"Certificate.MaxPathLen", Field, 0, ""},
+ {"Certificate.MaxPathLenZero", Field, 4, ""},
+ {"Certificate.NotAfter", Field, 0, ""},
+ {"Certificate.NotBefore", Field, 0, ""},
+ {"Certificate.OCSPServer", Field, 2, ""},
+ {"Certificate.PermittedDNSDomains", Field, 0, ""},
+ {"Certificate.PermittedDNSDomainsCritical", Field, 0, ""},
+ {"Certificate.PermittedEmailAddresses", Field, 10, ""},
+ {"Certificate.PermittedIPRanges", Field, 10, ""},
+ {"Certificate.PermittedURIDomains", Field, 10, ""},
+ {"Certificate.Policies", Field, 22, ""},
+ {"Certificate.PolicyIdentifiers", Field, 0, ""},
+ {"Certificate.PolicyMappings", Field, 24, ""},
+ {"Certificate.PublicKey", Field, 0, ""},
+ {"Certificate.PublicKeyAlgorithm", Field, 0, ""},
+ {"Certificate.Raw", Field, 0, ""},
+ {"Certificate.RawIssuer", Field, 0, ""},
+ {"Certificate.RawSubject", Field, 0, ""},
+ {"Certificate.RawSubjectPublicKeyInfo", Field, 0, ""},
+ {"Certificate.RawTBSCertificate", Field, 0, ""},
+ {"Certificate.RequireExplicitPolicy", Field, 24, ""},
+ {"Certificate.RequireExplicitPolicyZero", Field, 24, ""},
+ {"Certificate.SerialNumber", Field, 0, ""},
+ {"Certificate.Signature", Field, 0, ""},
+ {"Certificate.SignatureAlgorithm", Field, 0, ""},
+ {"Certificate.Subject", Field, 0, ""},
+ {"Certificate.SubjectKeyId", Field, 0, ""},
+ {"Certificate.URIs", Field, 10, ""},
+ {"Certificate.UnhandledCriticalExtensions", Field, 5, ""},
+ {"Certificate.UnknownExtKeyUsage", Field, 0, ""},
+ {"Certificate.Version", Field, 0, ""},
+ {"CertificateInvalidError", Type, 0, ""},
+ {"CertificateInvalidError.Cert", Field, 0, ""},
+ {"CertificateInvalidError.Detail", Field, 10, ""},
+ {"CertificateInvalidError.Reason", Field, 0, ""},
+ {"CertificateRequest", Type, 3, ""},
+ {"CertificateRequest.Attributes", Field, 3, ""},
+ {"CertificateRequest.DNSNames", Field, 3, ""},
+ {"CertificateRequest.EmailAddresses", Field, 3, ""},
+ {"CertificateRequest.Extensions", Field, 3, ""},
+ {"CertificateRequest.ExtraExtensions", Field, 3, ""},
+ {"CertificateRequest.IPAddresses", Field, 3, ""},
+ {"CertificateRequest.PublicKey", Field, 3, ""},
+ {"CertificateRequest.PublicKeyAlgorithm", Field, 3, ""},
+ {"CertificateRequest.Raw", Field, 3, ""},
+ {"CertificateRequest.RawSubject", Field, 3, ""},
+ {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3, ""},
+ {"CertificateRequest.RawTBSCertificateRequest", Field, 3, ""},
+ {"CertificateRequest.Signature", Field, 3, ""},
+ {"CertificateRequest.SignatureAlgorithm", Field, 3, ""},
+ {"CertificateRequest.Subject", Field, 3, ""},
+ {"CertificateRequest.URIs", Field, 10, ""},
+ {"CertificateRequest.Version", Field, 3, ""},
+ {"ConstraintViolationError", Type, 0, ""},
+ {"CreateCertificate", Func, 0, "func(rand io.Reader, template *Certificate, parent *Certificate, pub any, priv any) ([]byte, error)"},
+ {"CreateCertificateRequest", Func, 3, "func(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error)"},
+ {"CreateRevocationList", Func, 15, "func(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error)"},
+ {"DSA", Const, 0, ""},
+ {"DSAWithSHA1", Const, 0, ""},
+ {"DSAWithSHA256", Const, 0, ""},
+ {"DecryptPEMBlock", Func, 1, "func(b *pem.Block, password []byte) ([]byte, error)"},
+ {"ECDSA", Const, 1, ""},
+ {"ECDSAWithSHA1", Const, 1, ""},
+ {"ECDSAWithSHA256", Const, 1, ""},
+ {"ECDSAWithSHA384", Const, 1, ""},
+ {"ECDSAWithSHA512", Const, 1, ""},
+ {"Ed25519", Const, 13, ""},
+ {"EncryptPEMBlock", Func, 1, "func(rand io.Reader, blockType string, data []byte, password []byte, alg PEMCipher) (*pem.Block, error)"},
+ {"ErrUnsupportedAlgorithm", Var, 0, ""},
+ {"Expired", Const, 0, ""},
+ {"ExtKeyUsage", Type, 0, ""},
+ {"ExtKeyUsageAny", Const, 0, ""},
+ {"ExtKeyUsageClientAuth", Const, 0, ""},
+ {"ExtKeyUsageCodeSigning", Const, 0, ""},
+ {"ExtKeyUsageEmailProtection", Const, 0, ""},
+ {"ExtKeyUsageIPSECEndSystem", Const, 1, ""},
+ {"ExtKeyUsageIPSECTunnel", Const, 1, ""},
+ {"ExtKeyUsageIPSECUser", Const, 1, ""},
+ {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10, ""},
+ {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10, ""},
+ {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1, ""},
+ {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1, ""},
+ {"ExtKeyUsageOCSPSigning", Const, 0, ""},
+ {"ExtKeyUsageServerAuth", Const, 0, ""},
+ {"ExtKeyUsageTimeStamping", Const, 0, ""},
+ {"HostnameError", Type, 0, ""},
+ {"HostnameError.Certificate", Field, 0, ""},
+ {"HostnameError.Host", Field, 0, ""},
+ {"IncompatibleUsage", Const, 1, ""},
+ {"IncorrectPasswordError", Var, 1, ""},
+ {"InsecureAlgorithmError", Type, 6, ""},
+ {"InvalidReason", Type, 0, ""},
+ {"IsEncryptedPEMBlock", Func, 1, "func(b *pem.Block) bool"},
+ {"KeyUsage", Type, 0, ""},
+ {"KeyUsageCRLSign", Const, 0, ""},
+ {"KeyUsageCertSign", Const, 0, ""},
+ {"KeyUsageContentCommitment", Const, 0, ""},
+ {"KeyUsageDataEncipherment", Const, 0, ""},
+ {"KeyUsageDecipherOnly", Const, 0, ""},
+ {"KeyUsageDigitalSignature", Const, 0, ""},
+ {"KeyUsageEncipherOnly", Const, 0, ""},
+ {"KeyUsageKeyAgreement", Const, 0, ""},
+ {"KeyUsageKeyEncipherment", Const, 0, ""},
+ {"MD2WithRSA", Const, 0, ""},
+ {"MD5WithRSA", Const, 0, ""},
+ {"MarshalECPrivateKey", Func, 2, "func(key *ecdsa.PrivateKey) ([]byte, error)"},
+ {"MarshalPKCS1PrivateKey", Func, 0, "func(key *rsa.PrivateKey) []byte"},
+ {"MarshalPKCS1PublicKey", Func, 10, "func(key *rsa.PublicKey) []byte"},
+ {"MarshalPKCS8PrivateKey", Func, 10, "func(key any) ([]byte, error)"},
+ {"MarshalPKIXPublicKey", Func, 0, "func(pub any) ([]byte, error)"},
+ {"NameConstraintsWithoutSANs", Const, 10, ""},
+ {"NameMismatch", Const, 8, ""},
+ {"NewCertPool", Func, 0, "func() *CertPool"},
+ {"NoValidChains", Const, 24, ""},
+ {"NotAuthorizedToSign", Const, 0, ""},
+ {"OID", Type, 22, ""},
+ {"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"},
+ {"PEMCipher", Type, 1, ""},
+ {"PEMCipher3DES", Const, 1, ""},
+ {"PEMCipherAES128", Const, 1, ""},
+ {"PEMCipherAES192", Const, 1, ""},
+ {"PEMCipherAES256", Const, 1, ""},
+ {"PEMCipherDES", Const, 1, ""},
+ {"ParseCRL", Func, 0, "func(crlBytes []byte) (*pkix.CertificateList, error)"},
+ {"ParseCertificate", Func, 0, "func(der []byte) (*Certificate, error)"},
+ {"ParseCertificateRequest", Func, 3, "func(asn1Data []byte) (*CertificateRequest, error)"},
+ {"ParseCertificates", Func, 0, "func(der []byte) ([]*Certificate, error)"},
+ {"ParseDERCRL", Func, 0, "func(derBytes []byte) (*pkix.CertificateList, error)"},
+ {"ParseECPrivateKey", Func, 1, "func(der []byte) (*ecdsa.PrivateKey, error)"},
+ {"ParseOID", Func, 23, "func(oid string) (OID, error)"},
+ {"ParsePKCS1PrivateKey", Func, 0, "func(der []byte) (*rsa.PrivateKey, error)"},
+ {"ParsePKCS1PublicKey", Func, 10, "func(der []byte) (*rsa.PublicKey, error)"},
+ {"ParsePKCS8PrivateKey", Func, 0, "func(der []byte) (key any, err error)"},
+ {"ParsePKIXPublicKey", Func, 0, "func(derBytes []byte) (pub any, err error)"},
+ {"ParseRevocationList", Func, 19, "func(der []byte) (*RevocationList, error)"},
+ {"PolicyMapping", Type, 24, ""},
+ {"PolicyMapping.IssuerDomainPolicy", Field, 24, ""},
+ {"PolicyMapping.SubjectDomainPolicy", Field, 24, ""},
+ {"PublicKeyAlgorithm", Type, 0, ""},
+ {"PureEd25519", Const, 13, ""},
+ {"RSA", Const, 0, ""},
+ {"RevocationList", Type, 15, ""},
+ {"RevocationList.AuthorityKeyId", Field, 19, ""},
+ {"RevocationList.Extensions", Field, 19, ""},
+ {"RevocationList.ExtraExtensions", Field, 15, ""},
+ {"RevocationList.Issuer", Field, 19, ""},
+ {"RevocationList.NextUpdate", Field, 15, ""},
+ {"RevocationList.Number", Field, 15, ""},
+ {"RevocationList.Raw", Field, 19, ""},
+ {"RevocationList.RawIssuer", Field, 19, ""},
+ {"RevocationList.RawTBSRevocationList", Field, 19, ""},
+ {"RevocationList.RevokedCertificateEntries", Field, 21, ""},
+ {"RevocationList.RevokedCertificates", Field, 15, ""},
+ {"RevocationList.Signature", Field, 19, ""},
+ {"RevocationList.SignatureAlgorithm", Field, 15, ""},
+ {"RevocationList.ThisUpdate", Field, 15, ""},
+ {"RevocationListEntry", Type, 21, ""},
+ {"RevocationListEntry.Extensions", Field, 21, ""},
+ {"RevocationListEntry.ExtraExtensions", Field, 21, ""},
+ {"RevocationListEntry.Raw", Field, 21, ""},
+ {"RevocationListEntry.ReasonCode", Field, 21, ""},
+ {"RevocationListEntry.RevocationTime", Field, 21, ""},
+ {"RevocationListEntry.SerialNumber", Field, 21, ""},
+ {"SHA1WithRSA", Const, 0, ""},
+ {"SHA256WithRSA", Const, 0, ""},
+ {"SHA256WithRSAPSS", Const, 8, ""},
+ {"SHA384WithRSA", Const, 0, ""},
+ {"SHA384WithRSAPSS", Const, 8, ""},
+ {"SHA512WithRSA", Const, 0, ""},
+ {"SHA512WithRSAPSS", Const, 8, ""},
+ {"SetFallbackRoots", Func, 20, "func(roots *CertPool)"},
+ {"SignatureAlgorithm", Type, 0, ""},
+ {"SystemCertPool", Func, 7, "func() (*CertPool, error)"},
+ {"SystemRootsError", Type, 1, ""},
+ {"SystemRootsError.Err", Field, 7, ""},
+ {"TooManyConstraints", Const, 10, ""},
+ {"TooManyIntermediates", Const, 0, ""},
+ {"UnconstrainedName", Const, 10, ""},
+ {"UnhandledCriticalExtension", Type, 0, ""},
+ {"UnknownAuthorityError", Type, 0, ""},
+ {"UnknownAuthorityError.Cert", Field, 8, ""},
+ {"UnknownPublicKeyAlgorithm", Const, 0, ""},
+ {"UnknownSignatureAlgorithm", Const, 0, ""},
+ {"VerifyOptions", Type, 0, ""},
+ {"VerifyOptions.CertificatePolicies", Field, 24, ""},
+ {"VerifyOptions.CurrentTime", Field, 0, ""},
+ {"VerifyOptions.DNSName", Field, 0, ""},
+ {"VerifyOptions.Intermediates", Field, 0, ""},
+ {"VerifyOptions.KeyUsages", Field, 1, ""},
+ {"VerifyOptions.MaxConstraintComparisions", Field, 10, ""},
+ {"VerifyOptions.Roots", Field, 0, ""},
+ },
+ "crypto/x509/pkix": {
+ {"(*CertificateList).HasExpired", Method, 0, ""},
+ {"(*Name).FillFromRDNSequence", Method, 0, ""},
+ {"(Name).String", Method, 10, ""},
+ {"(Name).ToRDNSequence", Method, 0, ""},
+ {"(RDNSequence).String", Method, 10, ""},
+ {"AlgorithmIdentifier", Type, 0, ""},
+ {"AlgorithmIdentifier.Algorithm", Field, 0, ""},
+ {"AlgorithmIdentifier.Parameters", Field, 0, ""},
+ {"AttributeTypeAndValue", Type, 0, ""},
+ {"AttributeTypeAndValue.Type", Field, 0, ""},
+ {"AttributeTypeAndValue.Value", Field, 0, ""},
+ {"AttributeTypeAndValueSET", Type, 3, ""},
+ {"AttributeTypeAndValueSET.Type", Field, 3, ""},
+ {"AttributeTypeAndValueSET.Value", Field, 3, ""},
+ {"CertificateList", Type, 0, ""},
+ {"CertificateList.SignatureAlgorithm", Field, 0, ""},
+ {"CertificateList.SignatureValue", Field, 0, ""},
+ {"CertificateList.TBSCertList", Field, 0, ""},
+ {"Extension", Type, 0, ""},
+ {"Extension.Critical", Field, 0, ""},
+ {"Extension.Id", Field, 0, ""},
+ {"Extension.Value", Field, 0, ""},
+ {"Name", Type, 0, ""},
+ {"Name.CommonName", Field, 0, ""},
+ {"Name.Country", Field, 0, ""},
+ {"Name.ExtraNames", Field, 5, ""},
+ {"Name.Locality", Field, 0, ""},
+ {"Name.Names", Field, 0, ""},
+ {"Name.Organization", Field, 0, ""},
+ {"Name.OrganizationalUnit", Field, 0, ""},
+ {"Name.PostalCode", Field, 0, ""},
+ {"Name.Province", Field, 0, ""},
+ {"Name.SerialNumber", Field, 0, ""},
+ {"Name.StreetAddress", Field, 0, ""},
+ {"RDNSequence", Type, 0, ""},
+ {"RelativeDistinguishedNameSET", Type, 0, ""},
+ {"RevokedCertificate", Type, 0, ""},
+ {"RevokedCertificate.Extensions", Field, 0, ""},
+ {"RevokedCertificate.RevocationTime", Field, 0, ""},
+ {"RevokedCertificate.SerialNumber", Field, 0, ""},
+ {"TBSCertificateList", Type, 0, ""},
+ {"TBSCertificateList.Extensions", Field, 0, ""},
+ {"TBSCertificateList.Issuer", Field, 0, ""},
+ {"TBSCertificateList.NextUpdate", Field, 0, ""},
+ {"TBSCertificateList.Raw", Field, 0, ""},
+ {"TBSCertificateList.RevokedCertificates", Field, 0, ""},
+ {"TBSCertificateList.Signature", Field, 0, ""},
+ {"TBSCertificateList.ThisUpdate", Field, 0, ""},
+ {"TBSCertificateList.Version", Field, 0, ""},
+ },
+ "database/sql": {
+ {"(*ColumnType).DatabaseTypeName", Method, 8, ""},
+ {"(*ColumnType).DecimalSize", Method, 8, ""},
+ {"(*ColumnType).Length", Method, 8, ""},
+ {"(*ColumnType).Name", Method, 8, ""},
+ {"(*ColumnType).Nullable", Method, 8, ""},
+ {"(*ColumnType).ScanType", Method, 8, ""},
+ {"(*Conn).BeginTx", Method, 9, ""},
+ {"(*Conn).Close", Method, 9, ""},
+ {"(*Conn).ExecContext", Method, 9, ""},
+ {"(*Conn).PingContext", Method, 9, ""},
+ {"(*Conn).PrepareContext", Method, 9, ""},
+ {"(*Conn).QueryContext", Method, 9, ""},
+ {"(*Conn).QueryRowContext", Method, 9, ""},
+ {"(*Conn).Raw", Method, 13, ""},
+ {"(*DB).Begin", Method, 0, ""},
+ {"(*DB).BeginTx", Method, 8, ""},
+ {"(*DB).Close", Method, 0, ""},
+ {"(*DB).Conn", Method, 9, ""},
+ {"(*DB).Driver", Method, 0, ""},
+ {"(*DB).Exec", Method, 0, ""},
+ {"(*DB).ExecContext", Method, 8, ""},
+ {"(*DB).Ping", Method, 1, ""},
+ {"(*DB).PingContext", Method, 8, ""},
+ {"(*DB).Prepare", Method, 0, ""},
+ {"(*DB).PrepareContext", Method, 8, ""},
+ {"(*DB).Query", Method, 0, ""},
+ {"(*DB).QueryContext", Method, 8, ""},
+ {"(*DB).QueryRow", Method, 0, ""},
+ {"(*DB).QueryRowContext", Method, 8, ""},
+ {"(*DB).SetConnMaxIdleTime", Method, 15, ""},
+ {"(*DB).SetConnMaxLifetime", Method, 6, ""},
+ {"(*DB).SetMaxIdleConns", Method, 1, ""},
+ {"(*DB).SetMaxOpenConns", Method, 2, ""},
+ {"(*DB).Stats", Method, 5, ""},
+ {"(*Null).Scan", Method, 22, ""},
+ {"(*NullBool).Scan", Method, 0, ""},
+ {"(*NullByte).Scan", Method, 17, ""},
+ {"(*NullFloat64).Scan", Method, 0, ""},
+ {"(*NullInt16).Scan", Method, 17, ""},
+ {"(*NullInt32).Scan", Method, 13, ""},
+ {"(*NullInt64).Scan", Method, 0, ""},
+ {"(*NullString).Scan", Method, 0, ""},
+ {"(*NullTime).Scan", Method, 13, ""},
+ {"(*Row).Err", Method, 15, ""},
+ {"(*Row).Scan", Method, 0, ""},
+ {"(*Rows).Close", Method, 0, ""},
+ {"(*Rows).ColumnTypes", Method, 8, ""},
+ {"(*Rows).Columns", Method, 0, ""},
+ {"(*Rows).Err", Method, 0, ""},
+ {"(*Rows).Next", Method, 0, ""},
+ {"(*Rows).NextResultSet", Method, 8, ""},
+ {"(*Rows).Scan", Method, 0, ""},
+ {"(*Stmt).Close", Method, 0, ""},
+ {"(*Stmt).Exec", Method, 0, ""},
+ {"(*Stmt).ExecContext", Method, 8, ""},
+ {"(*Stmt).Query", Method, 0, ""},
+ {"(*Stmt).QueryContext", Method, 8, ""},
+ {"(*Stmt).QueryRow", Method, 0, ""},
+ {"(*Stmt).QueryRowContext", Method, 8, ""},
+ {"(*Tx).Commit", Method, 0, ""},
+ {"(*Tx).Exec", Method, 0, ""},
+ {"(*Tx).ExecContext", Method, 8, ""},
+ {"(*Tx).Prepare", Method, 0, ""},
+ {"(*Tx).PrepareContext", Method, 8, ""},
+ {"(*Tx).Query", Method, 0, ""},
+ {"(*Tx).QueryContext", Method, 8, ""},
+ {"(*Tx).QueryRow", Method, 0, ""},
+ {"(*Tx).QueryRowContext", Method, 8, ""},
+ {"(*Tx).Rollback", Method, 0, ""},
+ {"(*Tx).Stmt", Method, 0, ""},
+ {"(*Tx).StmtContext", Method, 8, ""},
+ {"(IsolationLevel).String", Method, 11, ""},
+ {"(Null).Value", Method, 22, ""},
+ {"(NullBool).Value", Method, 0, ""},
+ {"(NullByte).Value", Method, 17, ""},
+ {"(NullFloat64).Value", Method, 0, ""},
+ {"(NullInt16).Value", Method, 17, ""},
+ {"(NullInt32).Value", Method, 13, ""},
+ {"(NullInt64).Value", Method, 0, ""},
+ {"(NullString).Value", Method, 0, ""},
+ {"(NullTime).Value", Method, 13, ""},
+ {"ColumnType", Type, 8, ""},
+ {"Conn", Type, 9, ""},
+ {"DB", Type, 0, ""},
+ {"DBStats", Type, 5, ""},
+ {"DBStats.Idle", Field, 11, ""},
+ {"DBStats.InUse", Field, 11, ""},
+ {"DBStats.MaxIdleClosed", Field, 11, ""},
+ {"DBStats.MaxIdleTimeClosed", Field, 15, ""},
+ {"DBStats.MaxLifetimeClosed", Field, 11, ""},
+ {"DBStats.MaxOpenConnections", Field, 11, ""},
+ {"DBStats.OpenConnections", Field, 5, ""},
+ {"DBStats.WaitCount", Field, 11, ""},
+ {"DBStats.WaitDuration", Field, 11, ""},
+ {"Drivers", Func, 4, "func() []string"},
+ {"ErrConnDone", Var, 9, ""},
+ {"ErrNoRows", Var, 0, ""},
+ {"ErrTxDone", Var, 0, ""},
+ {"IsolationLevel", Type, 8, ""},
+ {"LevelDefault", Const, 8, ""},
+ {"LevelLinearizable", Const, 8, ""},
+ {"LevelReadCommitted", Const, 8, ""},
+ {"LevelReadUncommitted", Const, 8, ""},
+ {"LevelRepeatableRead", Const, 8, ""},
+ {"LevelSerializable", Const, 8, ""},
+ {"LevelSnapshot", Const, 8, ""},
+ {"LevelWriteCommitted", Const, 8, ""},
+ {"Named", Func, 8, "func(name string, value any) NamedArg"},
+ {"NamedArg", Type, 8, ""},
+ {"NamedArg.Name", Field, 8, ""},
+ {"NamedArg.Value", Field, 8, ""},
+ {"Null", Type, 22, ""},
+ {"Null.V", Field, 22, ""},
+ {"Null.Valid", Field, 22, ""},
+ {"NullBool", Type, 0, ""},
+ {"NullBool.Bool", Field, 0, ""},
+ {"NullBool.Valid", Field, 0, ""},
+ {"NullByte", Type, 17, ""},
+ {"NullByte.Byte", Field, 17, ""},
+ {"NullByte.Valid", Field, 17, ""},
+ {"NullFloat64", Type, 0, ""},
+ {"NullFloat64.Float64", Field, 0, ""},
+ {"NullFloat64.Valid", Field, 0, ""},
+ {"NullInt16", Type, 17, ""},
+ {"NullInt16.Int16", Field, 17, ""},
+ {"NullInt16.Valid", Field, 17, ""},
+ {"NullInt32", Type, 13, ""},
+ {"NullInt32.Int32", Field, 13, ""},
+ {"NullInt32.Valid", Field, 13, ""},
+ {"NullInt64", Type, 0, ""},
+ {"NullInt64.Int64", Field, 0, ""},
+ {"NullInt64.Valid", Field, 0, ""},
+ {"NullString", Type, 0, ""},
+ {"NullString.String", Field, 0, ""},
+ {"NullString.Valid", Field, 0, ""},
+ {"NullTime", Type, 13, ""},
+ {"NullTime.Time", Field, 13, ""},
+ {"NullTime.Valid", Field, 13, ""},
+ {"Open", Func, 0, "func(driverName string, dataSourceName string) (*DB, error)"},
+ {"OpenDB", Func, 10, "func(c driver.Connector) *DB"},
+ {"Out", Type, 9, ""},
+ {"Out.Dest", Field, 9, ""},
+ {"Out.In", Field, 9, ""},
+ {"RawBytes", Type, 0, ""},
+ {"Register", Func, 0, "func(name string, driver driver.Driver)"},
+ {"Result", Type, 0, ""},
+ {"Row", Type, 0, ""},
+ {"Rows", Type, 0, ""},
+ {"Scanner", Type, 0, ""},
+ {"Stmt", Type, 0, ""},
+ {"Tx", Type, 0, ""},
+ {"TxOptions", Type, 8, ""},
+ {"TxOptions.Isolation", Field, 8, ""},
+ {"TxOptions.ReadOnly", Field, 8, ""},
+ },
+ "database/sql/driver": {
+ {"(NotNull).ConvertValue", Method, 0, ""},
+ {"(Null).ConvertValue", Method, 0, ""},
+ {"(RowsAffected).LastInsertId", Method, 0, ""},
+ {"(RowsAffected).RowsAffected", Method, 0, ""},
+ {"Bool", Var, 0, ""},
+ {"ColumnConverter", Type, 0, ""},
+ {"Conn", Type, 0, ""},
+ {"ConnBeginTx", Type, 8, ""},
+ {"ConnPrepareContext", Type, 8, ""},
+ {"Connector", Type, 10, ""},
+ {"DefaultParameterConverter", Var, 0, ""},
+ {"Driver", Type, 0, ""},
+ {"DriverContext", Type, 10, ""},
+ {"ErrBadConn", Var, 0, ""},
+ {"ErrRemoveArgument", Var, 9, ""},
+ {"ErrSkip", Var, 0, ""},
+ {"Execer", Type, 0, ""},
+ {"ExecerContext", Type, 8, ""},
+ {"Int32", Var, 0, ""},
+ {"IsScanValue", Func, 0, "func(v any) bool"},
+ {"IsValue", Func, 0, "func(v any) bool"},
+ {"IsolationLevel", Type, 8, ""},
+ {"NamedValue", Type, 8, ""},
+ {"NamedValue.Name", Field, 8, ""},
+ {"NamedValue.Ordinal", Field, 8, ""},
+ {"NamedValue.Value", Field, 8, ""},
+ {"NamedValueChecker", Type, 9, ""},
+ {"NotNull", Type, 0, ""},
+ {"NotNull.Converter", Field, 0, ""},
+ {"Null", Type, 0, ""},
+ {"Null.Converter", Field, 0, ""},
+ {"Pinger", Type, 8, ""},
+ {"Queryer", Type, 1, ""},
+ {"QueryerContext", Type, 8, ""},
+ {"Result", Type, 0, ""},
+ {"ResultNoRows", Var, 0, ""},
+ {"Rows", Type, 0, ""},
+ {"RowsAffected", Type, 0, ""},
+ {"RowsColumnTypeDatabaseTypeName", Type, 8, ""},
+ {"RowsColumnTypeLength", Type, 8, ""},
+ {"RowsColumnTypeNullable", Type, 8, ""},
+ {"RowsColumnTypePrecisionScale", Type, 8, ""},
+ {"RowsColumnTypeScanType", Type, 8, ""},
+ {"RowsNextResultSet", Type, 8, ""},
+ {"SessionResetter", Type, 10, ""},
+ {"Stmt", Type, 0, ""},
+ {"StmtExecContext", Type, 8, ""},
+ {"StmtQueryContext", Type, 8, ""},
+ {"String", Var, 0, ""},
+ {"Tx", Type, 0, ""},
+ {"TxOptions", Type, 8, ""},
+ {"TxOptions.Isolation", Field, 8, ""},
+ {"TxOptions.ReadOnly", Field, 8, ""},
+ {"Validator", Type, 15, ""},
+ {"Value", Type, 0, ""},
+ {"ValueConverter", Type, 0, ""},
+ {"Valuer", Type, 0, ""},
+ },
+ "debug/buildinfo": {
+ {"BuildInfo", Type, 18, ""},
+ {"Read", Func, 18, "func(r io.ReaderAt) (*BuildInfo, error)"},
+ {"ReadFile", Func, 18, "func(name string) (info *BuildInfo, err error)"},
+ },
+ "debug/dwarf": {
+ {"(*AddrType).Basic", Method, 0, ""},
+ {"(*AddrType).Common", Method, 0, ""},
+ {"(*AddrType).Size", Method, 0, ""},
+ {"(*AddrType).String", Method, 0, ""},
+ {"(*ArrayType).Common", Method, 0, ""},
+ {"(*ArrayType).Size", Method, 0, ""},
+ {"(*ArrayType).String", Method, 0, ""},
+ {"(*BasicType).Basic", Method, 0, ""},
+ {"(*BasicType).Common", Method, 0, ""},
+ {"(*BasicType).Size", Method, 0, ""},
+ {"(*BasicType).String", Method, 0, ""},
+ {"(*BoolType).Basic", Method, 0, ""},
+ {"(*BoolType).Common", Method, 0, ""},
+ {"(*BoolType).Size", Method, 0, ""},
+ {"(*BoolType).String", Method, 0, ""},
+ {"(*CharType).Basic", Method, 0, ""},
+ {"(*CharType).Common", Method, 0, ""},
+ {"(*CharType).Size", Method, 0, ""},
+ {"(*CharType).String", Method, 0, ""},
+ {"(*CommonType).Common", Method, 0, ""},
+ {"(*CommonType).Size", Method, 0, ""},
+ {"(*ComplexType).Basic", Method, 0, ""},
+ {"(*ComplexType).Common", Method, 0, ""},
+ {"(*ComplexType).Size", Method, 0, ""},
+ {"(*ComplexType).String", Method, 0, ""},
+ {"(*Data).AddSection", Method, 14, ""},
+ {"(*Data).AddTypes", Method, 3, ""},
+ {"(*Data).LineReader", Method, 5, ""},
+ {"(*Data).Ranges", Method, 7, ""},
+ {"(*Data).Reader", Method, 0, ""},
+ {"(*Data).Type", Method, 0, ""},
+ {"(*DotDotDotType).Common", Method, 0, ""},
+ {"(*DotDotDotType).Size", Method, 0, ""},
+ {"(*DotDotDotType).String", Method, 0, ""},
+ {"(*Entry).AttrField", Method, 5, ""},
+ {"(*Entry).Val", Method, 0, ""},
+ {"(*EnumType).Common", Method, 0, ""},
+ {"(*EnumType).Size", Method, 0, ""},
+ {"(*EnumType).String", Method, 0, ""},
+ {"(*FloatType).Basic", Method, 0, ""},
+ {"(*FloatType).Common", Method, 0, ""},
+ {"(*FloatType).Size", Method, 0, ""},
+ {"(*FloatType).String", Method, 0, ""},
+ {"(*FuncType).Common", Method, 0, ""},
+ {"(*FuncType).Size", Method, 0, ""},
+ {"(*FuncType).String", Method, 0, ""},
+ {"(*IntType).Basic", Method, 0, ""},
+ {"(*IntType).Common", Method, 0, ""},
+ {"(*IntType).Size", Method, 0, ""},
+ {"(*IntType).String", Method, 0, ""},
+ {"(*LineReader).Files", Method, 14, ""},
+ {"(*LineReader).Next", Method, 5, ""},
+ {"(*LineReader).Reset", Method, 5, ""},
+ {"(*LineReader).Seek", Method, 5, ""},
+ {"(*LineReader).SeekPC", Method, 5, ""},
+ {"(*LineReader).Tell", Method, 5, ""},
+ {"(*PtrType).Common", Method, 0, ""},
+ {"(*PtrType).Size", Method, 0, ""},
+ {"(*PtrType).String", Method, 0, ""},
+ {"(*QualType).Common", Method, 0, ""},
+ {"(*QualType).Size", Method, 0, ""},
+ {"(*QualType).String", Method, 0, ""},
+ {"(*Reader).AddressSize", Method, 5, ""},
+ {"(*Reader).ByteOrder", Method, 14, ""},
+ {"(*Reader).Next", Method, 0, ""},
+ {"(*Reader).Seek", Method, 0, ""},
+ {"(*Reader).SeekPC", Method, 7, ""},
+ {"(*Reader).SkipChildren", Method, 0, ""},
+ {"(*StructType).Common", Method, 0, ""},
+ {"(*StructType).Defn", Method, 0, ""},
+ {"(*StructType).Size", Method, 0, ""},
+ {"(*StructType).String", Method, 0, ""},
+ {"(*TypedefType).Common", Method, 0, ""},
+ {"(*TypedefType).Size", Method, 0, ""},
+ {"(*TypedefType).String", Method, 0, ""},
+ {"(*UcharType).Basic", Method, 0, ""},
+ {"(*UcharType).Common", Method, 0, ""},
+ {"(*UcharType).Size", Method, 0, ""},
+ {"(*UcharType).String", Method, 0, ""},
+ {"(*UintType).Basic", Method, 0, ""},
+ {"(*UintType).Common", Method, 0, ""},
+ {"(*UintType).Size", Method, 0, ""},
+ {"(*UintType).String", Method, 0, ""},
+ {"(*UnspecifiedType).Basic", Method, 4, ""},
+ {"(*UnspecifiedType).Common", Method, 4, ""},
+ {"(*UnspecifiedType).Size", Method, 4, ""},
+ {"(*UnspecifiedType).String", Method, 4, ""},
+ {"(*UnsupportedType).Common", Method, 13, ""},
+ {"(*UnsupportedType).Size", Method, 13, ""},
+ {"(*UnsupportedType).String", Method, 13, ""},
+ {"(*VoidType).Common", Method, 0, ""},
+ {"(*VoidType).Size", Method, 0, ""},
+ {"(*VoidType).String", Method, 0, ""},
+ {"(Attr).GoString", Method, 0, ""},
+ {"(Attr).String", Method, 0, ""},
+ {"(Class).GoString", Method, 5, ""},
+ {"(Class).String", Method, 5, ""},
+ {"(DecodeError).Error", Method, 0, ""},
+ {"(Tag).GoString", Method, 0, ""},
+ {"(Tag).String", Method, 0, ""},
+ {"AddrType", Type, 0, ""},
+ {"AddrType.BasicType", Field, 0, ""},
+ {"ArrayType", Type, 0, ""},
+ {"ArrayType.CommonType", Field, 0, ""},
+ {"ArrayType.Count", Field, 0, ""},
+ {"ArrayType.StrideBitSize", Field, 0, ""},
+ {"ArrayType.Type", Field, 0, ""},
+ {"Attr", Type, 0, ""},
+ {"AttrAbstractOrigin", Const, 0, ""},
+ {"AttrAccessibility", Const, 0, ""},
+ {"AttrAddrBase", Const, 14, ""},
+ {"AttrAddrClass", Const, 0, ""},
+ {"AttrAlignment", Const, 14, ""},
+ {"AttrAllocated", Const, 0, ""},
+ {"AttrArtificial", Const, 0, ""},
+ {"AttrAssociated", Const, 0, ""},
+ {"AttrBaseTypes", Const, 0, ""},
+ {"AttrBinaryScale", Const, 14, ""},
+ {"AttrBitOffset", Const, 0, ""},
+ {"AttrBitSize", Const, 0, ""},
+ {"AttrByteSize", Const, 0, ""},
+ {"AttrCallAllCalls", Const, 14, ""},
+ {"AttrCallAllSourceCalls", Const, 14, ""},
+ {"AttrCallAllTailCalls", Const, 14, ""},
+ {"AttrCallColumn", Const, 0, ""},
+ {"AttrCallDataLocation", Const, 14, ""},
+ {"AttrCallDataValue", Const, 14, ""},
+ {"AttrCallFile", Const, 0, ""},
+ {"AttrCallLine", Const, 0, ""},
+ {"AttrCallOrigin", Const, 14, ""},
+ {"AttrCallPC", Const, 14, ""},
+ {"AttrCallParameter", Const, 14, ""},
+ {"AttrCallReturnPC", Const, 14, ""},
+ {"AttrCallTailCall", Const, 14, ""},
+ {"AttrCallTarget", Const, 14, ""},
+ {"AttrCallTargetClobbered", Const, 14, ""},
+ {"AttrCallValue", Const, 14, ""},
+ {"AttrCalling", Const, 0, ""},
+ {"AttrCommonRef", Const, 0, ""},
+ {"AttrCompDir", Const, 0, ""},
+ {"AttrConstExpr", Const, 14, ""},
+ {"AttrConstValue", Const, 0, ""},
+ {"AttrContainingType", Const, 0, ""},
+ {"AttrCount", Const, 0, ""},
+ {"AttrDataBitOffset", Const, 14, ""},
+ {"AttrDataLocation", Const, 0, ""},
+ {"AttrDataMemberLoc", Const, 0, ""},
+ {"AttrDecimalScale", Const, 14, ""},
+ {"AttrDecimalSign", Const, 14, ""},
+ {"AttrDeclColumn", Const, 0, ""},
+ {"AttrDeclFile", Const, 0, ""},
+ {"AttrDeclLine", Const, 0, ""},
+ {"AttrDeclaration", Const, 0, ""},
+ {"AttrDefaultValue", Const, 0, ""},
+ {"AttrDefaulted", Const, 14, ""},
+ {"AttrDeleted", Const, 14, ""},
+ {"AttrDescription", Const, 0, ""},
+ {"AttrDigitCount", Const, 14, ""},
+ {"AttrDiscr", Const, 0, ""},
+ {"AttrDiscrList", Const, 0, ""},
+ {"AttrDiscrValue", Const, 0, ""},
+ {"AttrDwoName", Const, 14, ""},
+ {"AttrElemental", Const, 14, ""},
+ {"AttrEncoding", Const, 0, ""},
+ {"AttrEndianity", Const, 14, ""},
+ {"AttrEntrypc", Const, 0, ""},
+ {"AttrEnumClass", Const, 14, ""},
+ {"AttrExplicit", Const, 14, ""},
+ {"AttrExportSymbols", Const, 14, ""},
+ {"AttrExtension", Const, 0, ""},
+ {"AttrExternal", Const, 0, ""},
+ {"AttrFrameBase", Const, 0, ""},
+ {"AttrFriend", Const, 0, ""},
+ {"AttrHighpc", Const, 0, ""},
+ {"AttrIdentifierCase", Const, 0, ""},
+ {"AttrImport", Const, 0, ""},
+ {"AttrInline", Const, 0, ""},
+ {"AttrIsOptional", Const, 0, ""},
+ {"AttrLanguage", Const, 0, ""},
+ {"AttrLinkageName", Const, 14, ""},
+ {"AttrLocation", Const, 0, ""},
+ {"AttrLoclistsBase", Const, 14, ""},
+ {"AttrLowerBound", Const, 0, ""},
+ {"AttrLowpc", Const, 0, ""},
+ {"AttrMacroInfo", Const, 0, ""},
+ {"AttrMacros", Const, 14, ""},
+ {"AttrMainSubprogram", Const, 14, ""},
+ {"AttrMutable", Const, 14, ""},
+ {"AttrName", Const, 0, ""},
+ {"AttrNamelistItem", Const, 0, ""},
+ {"AttrNoreturn", Const, 14, ""},
+ {"AttrObjectPointer", Const, 14, ""},
+ {"AttrOrdering", Const, 0, ""},
+ {"AttrPictureString", Const, 14, ""},
+ {"AttrPriority", Const, 0, ""},
+ {"AttrProducer", Const, 0, ""},
+ {"AttrPrototyped", Const, 0, ""},
+ {"AttrPure", Const, 14, ""},
+ {"AttrRanges", Const, 0, ""},
+ {"AttrRank", Const, 14, ""},
+ {"AttrRecursive", Const, 14, ""},
+ {"AttrReference", Const, 14, ""},
+ {"AttrReturnAddr", Const, 0, ""},
+ {"AttrRnglistsBase", Const, 14, ""},
+ {"AttrRvalueReference", Const, 14, ""},
+ {"AttrSegment", Const, 0, ""},
+ {"AttrSibling", Const, 0, ""},
+ {"AttrSignature", Const, 14, ""},
+ {"AttrSmall", Const, 14, ""},
+ {"AttrSpecification", Const, 0, ""},
+ {"AttrStartScope", Const, 0, ""},
+ {"AttrStaticLink", Const, 0, ""},
+ {"AttrStmtList", Const, 0, ""},
+ {"AttrStrOffsetsBase", Const, 14, ""},
+ {"AttrStride", Const, 0, ""},
+ {"AttrStrideSize", Const, 0, ""},
+ {"AttrStringLength", Const, 0, ""},
+ {"AttrStringLengthBitSize", Const, 14, ""},
+ {"AttrStringLengthByteSize", Const, 14, ""},
+ {"AttrThreadsScaled", Const, 14, ""},
+ {"AttrTrampoline", Const, 0, ""},
+ {"AttrType", Const, 0, ""},
+ {"AttrUpperBound", Const, 0, ""},
+ {"AttrUseLocation", Const, 0, ""},
+ {"AttrUseUTF8", Const, 0, ""},
+ {"AttrVarParam", Const, 0, ""},
+ {"AttrVirtuality", Const, 0, ""},
+ {"AttrVisibility", Const, 0, ""},
+ {"AttrVtableElemLoc", Const, 0, ""},
+ {"BasicType", Type, 0, ""},
+ {"BasicType.BitOffset", Field, 0, ""},
+ {"BasicType.BitSize", Field, 0, ""},
+ {"BasicType.CommonType", Field, 0, ""},
+ {"BasicType.DataBitOffset", Field, 18, ""},
+ {"BoolType", Type, 0, ""},
+ {"BoolType.BasicType", Field, 0, ""},
+ {"CharType", Type, 0, ""},
+ {"CharType.BasicType", Field, 0, ""},
+ {"Class", Type, 5, ""},
+ {"ClassAddrPtr", Const, 14, ""},
+ {"ClassAddress", Const, 5, ""},
+ {"ClassBlock", Const, 5, ""},
+ {"ClassConstant", Const, 5, ""},
+ {"ClassExprLoc", Const, 5, ""},
+ {"ClassFlag", Const, 5, ""},
+ {"ClassLinePtr", Const, 5, ""},
+ {"ClassLocList", Const, 14, ""},
+ {"ClassLocListPtr", Const, 5, ""},
+ {"ClassMacPtr", Const, 5, ""},
+ {"ClassRangeListPtr", Const, 5, ""},
+ {"ClassReference", Const, 5, ""},
+ {"ClassReferenceAlt", Const, 5, ""},
+ {"ClassReferenceSig", Const, 5, ""},
+ {"ClassRngList", Const, 14, ""},
+ {"ClassRngListsPtr", Const, 14, ""},
+ {"ClassStrOffsetsPtr", Const, 14, ""},
+ {"ClassString", Const, 5, ""},
+ {"ClassStringAlt", Const, 5, ""},
+ {"ClassUnknown", Const, 6, ""},
+ {"CommonType", Type, 0, ""},
+ {"CommonType.ByteSize", Field, 0, ""},
+ {"CommonType.Name", Field, 0, ""},
+ {"ComplexType", Type, 0, ""},
+ {"ComplexType.BasicType", Field, 0, ""},
+ {"Data", Type, 0, ""},
+ {"DecodeError", Type, 0, ""},
+ {"DecodeError.Err", Field, 0, ""},
+ {"DecodeError.Name", Field, 0, ""},
+ {"DecodeError.Offset", Field, 0, ""},
+ {"DotDotDotType", Type, 0, ""},
+ {"DotDotDotType.CommonType", Field, 0, ""},
+ {"Entry", Type, 0, ""},
+ {"Entry.Children", Field, 0, ""},
+ {"Entry.Field", Field, 0, ""},
+ {"Entry.Offset", Field, 0, ""},
+ {"Entry.Tag", Field, 0, ""},
+ {"EnumType", Type, 0, ""},
+ {"EnumType.CommonType", Field, 0, ""},
+ {"EnumType.EnumName", Field, 0, ""},
+ {"EnumType.Val", Field, 0, ""},
+ {"EnumValue", Type, 0, ""},
+ {"EnumValue.Name", Field, 0, ""},
+ {"EnumValue.Val", Field, 0, ""},
+ {"ErrUnknownPC", Var, 5, ""},
+ {"Field", Type, 0, ""},
+ {"Field.Attr", Field, 0, ""},
+ {"Field.Class", Field, 5, ""},
+ {"Field.Val", Field, 0, ""},
+ {"FloatType", Type, 0, ""},
+ {"FloatType.BasicType", Field, 0, ""},
+ {"FuncType", Type, 0, ""},
+ {"FuncType.CommonType", Field, 0, ""},
+ {"FuncType.ParamType", Field, 0, ""},
+ {"FuncType.ReturnType", Field, 0, ""},
+ {"IntType", Type, 0, ""},
+ {"IntType.BasicType", Field, 0, ""},
+ {"LineEntry", Type, 5, ""},
+ {"LineEntry.Address", Field, 5, ""},
+ {"LineEntry.BasicBlock", Field, 5, ""},
+ {"LineEntry.Column", Field, 5, ""},
+ {"LineEntry.Discriminator", Field, 5, ""},
+ {"LineEntry.EndSequence", Field, 5, ""},
+ {"LineEntry.EpilogueBegin", Field, 5, ""},
+ {"LineEntry.File", Field, 5, ""},
+ {"LineEntry.ISA", Field, 5, ""},
+ {"LineEntry.IsStmt", Field, 5, ""},
+ {"LineEntry.Line", Field, 5, ""},
+ {"LineEntry.OpIndex", Field, 5, ""},
+ {"LineEntry.PrologueEnd", Field, 5, ""},
+ {"LineFile", Type, 5, ""},
+ {"LineFile.Length", Field, 5, ""},
+ {"LineFile.Mtime", Field, 5, ""},
+ {"LineFile.Name", Field, 5, ""},
+ {"LineReader", Type, 5, ""},
+ {"LineReaderPos", Type, 5, ""},
+ {"New", Func, 0, "func(abbrev []byte, aranges []byte, frame []byte, info []byte, line []byte, pubnames []byte, ranges []byte, str []byte) (*Data, error)"},
+ {"Offset", Type, 0, ""},
+ {"PtrType", Type, 0, ""},
+ {"PtrType.CommonType", Field, 0, ""},
+ {"PtrType.Type", Field, 0, ""},
+ {"QualType", Type, 0, ""},
+ {"QualType.CommonType", Field, 0, ""},
+ {"QualType.Qual", Field, 0, ""},
+ {"QualType.Type", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"StructField", Type, 0, ""},
+ {"StructField.BitOffset", Field, 0, ""},
+ {"StructField.BitSize", Field, 0, ""},
+ {"StructField.ByteOffset", Field, 0, ""},
+ {"StructField.ByteSize", Field, 0, ""},
+ {"StructField.DataBitOffset", Field, 18, ""},
+ {"StructField.Name", Field, 0, ""},
+ {"StructField.Type", Field, 0, ""},
+ {"StructType", Type, 0, ""},
+ {"StructType.CommonType", Field, 0, ""},
+ {"StructType.Field", Field, 0, ""},
+ {"StructType.Incomplete", Field, 0, ""},
+ {"StructType.Kind", Field, 0, ""},
+ {"StructType.StructName", Field, 0, ""},
+ {"Tag", Type, 0, ""},
+ {"TagAccessDeclaration", Const, 0, ""},
+ {"TagArrayType", Const, 0, ""},
+ {"TagAtomicType", Const, 14, ""},
+ {"TagBaseType", Const, 0, ""},
+ {"TagCallSite", Const, 14, ""},
+ {"TagCallSiteParameter", Const, 14, ""},
+ {"TagCatchDwarfBlock", Const, 0, ""},
+ {"TagClassType", Const, 0, ""},
+ {"TagCoarrayType", Const, 14, ""},
+ {"TagCommonDwarfBlock", Const, 0, ""},
+ {"TagCommonInclusion", Const, 0, ""},
+ {"TagCompileUnit", Const, 0, ""},
+ {"TagCondition", Const, 3, ""},
+ {"TagConstType", Const, 0, ""},
+ {"TagConstant", Const, 0, ""},
+ {"TagDwarfProcedure", Const, 0, ""},
+ {"TagDynamicType", Const, 14, ""},
+ {"TagEntryPoint", Const, 0, ""},
+ {"TagEnumerationType", Const, 0, ""},
+ {"TagEnumerator", Const, 0, ""},
+ {"TagFileType", Const, 0, ""},
+ {"TagFormalParameter", Const, 0, ""},
+ {"TagFriend", Const, 0, ""},
+ {"TagGenericSubrange", Const, 14, ""},
+ {"TagImmutableType", Const, 14, ""},
+ {"TagImportedDeclaration", Const, 0, ""},
+ {"TagImportedModule", Const, 0, ""},
+ {"TagImportedUnit", Const, 0, ""},
+ {"TagInheritance", Const, 0, ""},
+ {"TagInlinedSubroutine", Const, 0, ""},
+ {"TagInterfaceType", Const, 0, ""},
+ {"TagLabel", Const, 0, ""},
+ {"TagLexDwarfBlock", Const, 0, ""},
+ {"TagMember", Const, 0, ""},
+ {"TagModule", Const, 0, ""},
+ {"TagMutableType", Const, 0, ""},
+ {"TagNamelist", Const, 0, ""},
+ {"TagNamelistItem", Const, 0, ""},
+ {"TagNamespace", Const, 0, ""},
+ {"TagPackedType", Const, 0, ""},
+ {"TagPartialUnit", Const, 0, ""},
+ {"TagPointerType", Const, 0, ""},
+ {"TagPtrToMemberType", Const, 0, ""},
+ {"TagReferenceType", Const, 0, ""},
+ {"TagRestrictType", Const, 0, ""},
+ {"TagRvalueReferenceType", Const, 3, ""},
+ {"TagSetType", Const, 0, ""},
+ {"TagSharedType", Const, 3, ""},
+ {"TagSkeletonUnit", Const, 14, ""},
+ {"TagStringType", Const, 0, ""},
+ {"TagStructType", Const, 0, ""},
+ {"TagSubprogram", Const, 0, ""},
+ {"TagSubrangeType", Const, 0, ""},
+ {"TagSubroutineType", Const, 0, ""},
+ {"TagTemplateAlias", Const, 3, ""},
+ {"TagTemplateTypeParameter", Const, 0, ""},
+ {"TagTemplateValueParameter", Const, 0, ""},
+ {"TagThrownType", Const, 0, ""},
+ {"TagTryDwarfBlock", Const, 0, ""},
+ {"TagTypeUnit", Const, 3, ""},
+ {"TagTypedef", Const, 0, ""},
+ {"TagUnionType", Const, 0, ""},
+ {"TagUnspecifiedParameters", Const, 0, ""},
+ {"TagUnspecifiedType", Const, 0, ""},
+ {"TagVariable", Const, 0, ""},
+ {"TagVariant", Const, 0, ""},
+ {"TagVariantPart", Const, 0, ""},
+ {"TagVolatileType", Const, 0, ""},
+ {"TagWithStmt", Const, 0, ""},
+ {"Type", Type, 0, ""},
+ {"TypedefType", Type, 0, ""},
+ {"TypedefType.CommonType", Field, 0, ""},
+ {"TypedefType.Type", Field, 0, ""},
+ {"UcharType", Type, 0, ""},
+ {"UcharType.BasicType", Field, 0, ""},
+ {"UintType", Type, 0, ""},
+ {"UintType.BasicType", Field, 0, ""},
+ {"UnspecifiedType", Type, 4, ""},
+ {"UnspecifiedType.BasicType", Field, 4, ""},
+ {"UnsupportedType", Type, 13, ""},
+ {"UnsupportedType.CommonType", Field, 13, ""},
+ {"UnsupportedType.Tag", Field, 13, ""},
+ {"VoidType", Type, 0, ""},
+ {"VoidType.CommonType", Field, 0, ""},
+ },
+ "debug/elf": {
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).DWARF", Method, 0, ""},
+ {"(*File).DynString", Method, 1, ""},
+ {"(*File).DynValue", Method, 21, ""},
+ {"(*File).DynamicSymbols", Method, 4, ""},
+ {"(*File).DynamicVersionNeeds", Method, 24, ""},
+ {"(*File).DynamicVersions", Method, 24, ""},
+ {"(*File).ImportedLibraries", Method, 0, ""},
+ {"(*File).ImportedSymbols", Method, 0, ""},
+ {"(*File).Section", Method, 0, ""},
+ {"(*File).SectionByType", Method, 0, ""},
+ {"(*File).Symbols", Method, 0, ""},
+ {"(*FormatError).Error", Method, 0, ""},
+ {"(*Prog).Open", Method, 0, ""},
+ {"(*Section).Data", Method, 0, ""},
+ {"(*Section).Open", Method, 0, ""},
+ {"(Class).GoString", Method, 0, ""},
+ {"(Class).String", Method, 0, ""},
+ {"(CompressionType).GoString", Method, 6, ""},
+ {"(CompressionType).String", Method, 6, ""},
+ {"(Data).GoString", Method, 0, ""},
+ {"(Data).String", Method, 0, ""},
+ {"(DynFlag).GoString", Method, 0, ""},
+ {"(DynFlag).String", Method, 0, ""},
+ {"(DynFlag1).GoString", Method, 21, ""},
+ {"(DynFlag1).String", Method, 21, ""},
+ {"(DynTag).GoString", Method, 0, ""},
+ {"(DynTag).String", Method, 0, ""},
+ {"(Machine).GoString", Method, 0, ""},
+ {"(Machine).String", Method, 0, ""},
+ {"(NType).GoString", Method, 0, ""},
+ {"(NType).String", Method, 0, ""},
+ {"(OSABI).GoString", Method, 0, ""},
+ {"(OSABI).String", Method, 0, ""},
+ {"(Prog).ReadAt", Method, 0, ""},
+ {"(ProgFlag).GoString", Method, 0, ""},
+ {"(ProgFlag).String", Method, 0, ""},
+ {"(ProgType).GoString", Method, 0, ""},
+ {"(ProgType).String", Method, 0, ""},
+ {"(R_386).GoString", Method, 0, ""},
+ {"(R_386).String", Method, 0, ""},
+ {"(R_390).GoString", Method, 7, ""},
+ {"(R_390).String", Method, 7, ""},
+ {"(R_AARCH64).GoString", Method, 4, ""},
+ {"(R_AARCH64).String", Method, 4, ""},
+ {"(R_ALPHA).GoString", Method, 0, ""},
+ {"(R_ALPHA).String", Method, 0, ""},
+ {"(R_ARM).GoString", Method, 0, ""},
+ {"(R_ARM).String", Method, 0, ""},
+ {"(R_LARCH).GoString", Method, 19, ""},
+ {"(R_LARCH).String", Method, 19, ""},
+ {"(R_MIPS).GoString", Method, 6, ""},
+ {"(R_MIPS).String", Method, 6, ""},
+ {"(R_PPC).GoString", Method, 0, ""},
+ {"(R_PPC).String", Method, 0, ""},
+ {"(R_PPC64).GoString", Method, 5, ""},
+ {"(R_PPC64).String", Method, 5, ""},
+ {"(R_RISCV).GoString", Method, 11, ""},
+ {"(R_RISCV).String", Method, 11, ""},
+ {"(R_SPARC).GoString", Method, 0, ""},
+ {"(R_SPARC).String", Method, 0, ""},
+ {"(R_X86_64).GoString", Method, 0, ""},
+ {"(R_X86_64).String", Method, 0, ""},
+ {"(Section).ReadAt", Method, 0, ""},
+ {"(SectionFlag).GoString", Method, 0, ""},
+ {"(SectionFlag).String", Method, 0, ""},
+ {"(SectionIndex).GoString", Method, 0, ""},
+ {"(SectionIndex).String", Method, 0, ""},
+ {"(SectionType).GoString", Method, 0, ""},
+ {"(SectionType).String", Method, 0, ""},
+ {"(SymBind).GoString", Method, 0, ""},
+ {"(SymBind).String", Method, 0, ""},
+ {"(SymType).GoString", Method, 0, ""},
+ {"(SymType).String", Method, 0, ""},
+ {"(SymVis).GoString", Method, 0, ""},
+ {"(SymVis).String", Method, 0, ""},
+ {"(Type).GoString", Method, 0, ""},
+ {"(Type).String", Method, 0, ""},
+ {"(Version).GoString", Method, 0, ""},
+ {"(Version).String", Method, 0, ""},
+ {"(VersionIndex).Index", Method, 24, ""},
+ {"(VersionIndex).IsHidden", Method, 24, ""},
+ {"ARM_MAGIC_TRAMP_NUMBER", Const, 0, ""},
+ {"COMPRESS_HIOS", Const, 6, ""},
+ {"COMPRESS_HIPROC", Const, 6, ""},
+ {"COMPRESS_LOOS", Const, 6, ""},
+ {"COMPRESS_LOPROC", Const, 6, ""},
+ {"COMPRESS_ZLIB", Const, 6, ""},
+ {"COMPRESS_ZSTD", Const, 21, ""},
+ {"Chdr32", Type, 6, ""},
+ {"Chdr32.Addralign", Field, 6, ""},
+ {"Chdr32.Size", Field, 6, ""},
+ {"Chdr32.Type", Field, 6, ""},
+ {"Chdr64", Type, 6, ""},
+ {"Chdr64.Addralign", Field, 6, ""},
+ {"Chdr64.Size", Field, 6, ""},
+ {"Chdr64.Type", Field, 6, ""},
+ {"Class", Type, 0, ""},
+ {"CompressionType", Type, 6, ""},
+ {"DF_1_CONFALT", Const, 21, ""},
+ {"DF_1_DIRECT", Const, 21, ""},
+ {"DF_1_DISPRELDNE", Const, 21, ""},
+ {"DF_1_DISPRELPND", Const, 21, ""},
+ {"DF_1_EDITED", Const, 21, ""},
+ {"DF_1_ENDFILTEE", Const, 21, ""},
+ {"DF_1_GLOBAL", Const, 21, ""},
+ {"DF_1_GLOBAUDIT", Const, 21, ""},
+ {"DF_1_GROUP", Const, 21, ""},
+ {"DF_1_IGNMULDEF", Const, 21, ""},
+ {"DF_1_INITFIRST", Const, 21, ""},
+ {"DF_1_INTERPOSE", Const, 21, ""},
+ {"DF_1_KMOD", Const, 21, ""},
+ {"DF_1_LOADFLTR", Const, 21, ""},
+ {"DF_1_NOCOMMON", Const, 21, ""},
+ {"DF_1_NODEFLIB", Const, 21, ""},
+ {"DF_1_NODELETE", Const, 21, ""},
+ {"DF_1_NODIRECT", Const, 21, ""},
+ {"DF_1_NODUMP", Const, 21, ""},
+ {"DF_1_NOHDR", Const, 21, ""},
+ {"DF_1_NOKSYMS", Const, 21, ""},
+ {"DF_1_NOOPEN", Const, 21, ""},
+ {"DF_1_NORELOC", Const, 21, ""},
+ {"DF_1_NOW", Const, 21, ""},
+ {"DF_1_ORIGIN", Const, 21, ""},
+ {"DF_1_PIE", Const, 21, ""},
+ {"DF_1_SINGLETON", Const, 21, ""},
+ {"DF_1_STUB", Const, 21, ""},
+ {"DF_1_SYMINTPOSE", Const, 21, ""},
+ {"DF_1_TRANS", Const, 21, ""},
+ {"DF_1_WEAKFILTER", Const, 21, ""},
+ {"DF_BIND_NOW", Const, 0, ""},
+ {"DF_ORIGIN", Const, 0, ""},
+ {"DF_STATIC_TLS", Const, 0, ""},
+ {"DF_SYMBOLIC", Const, 0, ""},
+ {"DF_TEXTREL", Const, 0, ""},
+ {"DT_ADDRRNGHI", Const, 16, ""},
+ {"DT_ADDRRNGLO", Const, 16, ""},
+ {"DT_AUDIT", Const, 16, ""},
+ {"DT_AUXILIARY", Const, 16, ""},
+ {"DT_BIND_NOW", Const, 0, ""},
+ {"DT_CHECKSUM", Const, 16, ""},
+ {"DT_CONFIG", Const, 16, ""},
+ {"DT_DEBUG", Const, 0, ""},
+ {"DT_DEPAUDIT", Const, 16, ""},
+ {"DT_ENCODING", Const, 0, ""},
+ {"DT_FEATURE", Const, 16, ""},
+ {"DT_FILTER", Const, 16, ""},
+ {"DT_FINI", Const, 0, ""},
+ {"DT_FINI_ARRAY", Const, 0, ""},
+ {"DT_FINI_ARRAYSZ", Const, 0, ""},
+ {"DT_FLAGS", Const, 0, ""},
+ {"DT_FLAGS_1", Const, 16, ""},
+ {"DT_GNU_CONFLICT", Const, 16, ""},
+ {"DT_GNU_CONFLICTSZ", Const, 16, ""},
+ {"DT_GNU_HASH", Const, 16, ""},
+ {"DT_GNU_LIBLIST", Const, 16, ""},
+ {"DT_GNU_LIBLISTSZ", Const, 16, ""},
+ {"DT_GNU_PRELINKED", Const, 16, ""},
+ {"DT_HASH", Const, 0, ""},
+ {"DT_HIOS", Const, 0, ""},
+ {"DT_HIPROC", Const, 0, ""},
+ {"DT_INIT", Const, 0, ""},
+ {"DT_INIT_ARRAY", Const, 0, ""},
+ {"DT_INIT_ARRAYSZ", Const, 0, ""},
+ {"DT_JMPREL", Const, 0, ""},
+ {"DT_LOOS", Const, 0, ""},
+ {"DT_LOPROC", Const, 0, ""},
+ {"DT_MIPS_AUX_DYNAMIC", Const, 16, ""},
+ {"DT_MIPS_BASE_ADDRESS", Const, 16, ""},
+ {"DT_MIPS_COMPACT_SIZE", Const, 16, ""},
+ {"DT_MIPS_CONFLICT", Const, 16, ""},
+ {"DT_MIPS_CONFLICTNO", Const, 16, ""},
+ {"DT_MIPS_CXX_FLAGS", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASS", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASSSYM", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASS_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_INSTANCE", Const, 16, ""},
+ {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_RELOC", Const, 16, ""},
+ {"DT_MIPS_DELTA_RELOC_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_SYM", Const, 16, ""},
+ {"DT_MIPS_DELTA_SYM_NO", Const, 16, ""},
+ {"DT_MIPS_DYNSTR_ALIGN", Const, 16, ""},
+ {"DT_MIPS_FLAGS", Const, 16, ""},
+ {"DT_MIPS_GOTSYM", Const, 16, ""},
+ {"DT_MIPS_GP_VALUE", Const, 16, ""},
+ {"DT_MIPS_HIDDEN_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_HIPAGENO", Const, 16, ""},
+ {"DT_MIPS_ICHECKSUM", Const, 16, ""},
+ {"DT_MIPS_INTERFACE", Const, 16, ""},
+ {"DT_MIPS_INTERFACE_SIZE", Const, 16, ""},
+ {"DT_MIPS_IVERSION", Const, 16, ""},
+ {"DT_MIPS_LIBLIST", Const, 16, ""},
+ {"DT_MIPS_LIBLISTNO", Const, 16, ""},
+ {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_LOCAL_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_LOCAL_GOTNO", Const, 16, ""},
+ {"DT_MIPS_MSYM", Const, 16, ""},
+ {"DT_MIPS_OPTIONS", Const, 16, ""},
+ {"DT_MIPS_PERF_SUFFIX", Const, 16, ""},
+ {"DT_MIPS_PIXIE_INIT", Const, 16, ""},
+ {"DT_MIPS_PLTGOT", Const, 16, ""},
+ {"DT_MIPS_PROTECTED_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_RLD_MAP", Const, 16, ""},
+ {"DT_MIPS_RLD_MAP_REL", Const, 16, ""},
+ {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16, ""},
+ {"DT_MIPS_RLD_VERSION", Const, 16, ""},
+ {"DT_MIPS_RWPLT", Const, 16, ""},
+ {"DT_MIPS_SYMBOL_LIB", Const, 16, ""},
+ {"DT_MIPS_SYMTABNO", Const, 16, ""},
+ {"DT_MIPS_TIME_STAMP", Const, 16, ""},
+ {"DT_MIPS_UNREFEXTNO", Const, 16, ""},
+ {"DT_MOVEENT", Const, 16, ""},
+ {"DT_MOVESZ", Const, 16, ""},
+ {"DT_MOVETAB", Const, 16, ""},
+ {"DT_NEEDED", Const, 0, ""},
+ {"DT_NULL", Const, 0, ""},
+ {"DT_PLTGOT", Const, 0, ""},
+ {"DT_PLTPAD", Const, 16, ""},
+ {"DT_PLTPADSZ", Const, 16, ""},
+ {"DT_PLTREL", Const, 0, ""},
+ {"DT_PLTRELSZ", Const, 0, ""},
+ {"DT_POSFLAG_1", Const, 16, ""},
+ {"DT_PPC64_GLINK", Const, 16, ""},
+ {"DT_PPC64_OPD", Const, 16, ""},
+ {"DT_PPC64_OPDSZ", Const, 16, ""},
+ {"DT_PPC64_OPT", Const, 16, ""},
+ {"DT_PPC_GOT", Const, 16, ""},
+ {"DT_PPC_OPT", Const, 16, ""},
+ {"DT_PREINIT_ARRAY", Const, 0, ""},
+ {"DT_PREINIT_ARRAYSZ", Const, 0, ""},
+ {"DT_REL", Const, 0, ""},
+ {"DT_RELA", Const, 0, ""},
+ {"DT_RELACOUNT", Const, 16, ""},
+ {"DT_RELAENT", Const, 0, ""},
+ {"DT_RELASZ", Const, 0, ""},
+ {"DT_RELCOUNT", Const, 16, ""},
+ {"DT_RELENT", Const, 0, ""},
+ {"DT_RELSZ", Const, 0, ""},
+ {"DT_RPATH", Const, 0, ""},
+ {"DT_RUNPATH", Const, 0, ""},
+ {"DT_SONAME", Const, 0, ""},
+ {"DT_SPARC_REGISTER", Const, 16, ""},
+ {"DT_STRSZ", Const, 0, ""},
+ {"DT_STRTAB", Const, 0, ""},
+ {"DT_SYMBOLIC", Const, 0, ""},
+ {"DT_SYMENT", Const, 0, ""},
+ {"DT_SYMINENT", Const, 16, ""},
+ {"DT_SYMINFO", Const, 16, ""},
+ {"DT_SYMINSZ", Const, 16, ""},
+ {"DT_SYMTAB", Const, 0, ""},
+ {"DT_SYMTAB_SHNDX", Const, 16, ""},
+ {"DT_TEXTREL", Const, 0, ""},
+ {"DT_TLSDESC_GOT", Const, 16, ""},
+ {"DT_TLSDESC_PLT", Const, 16, ""},
+ {"DT_USED", Const, 16, ""},
+ {"DT_VALRNGHI", Const, 16, ""},
+ {"DT_VALRNGLO", Const, 16, ""},
+ {"DT_VERDEF", Const, 16, ""},
+ {"DT_VERDEFNUM", Const, 16, ""},
+ {"DT_VERNEED", Const, 0, ""},
+ {"DT_VERNEEDNUM", Const, 0, ""},
+ {"DT_VERSYM", Const, 0, ""},
+ {"Data", Type, 0, ""},
+ {"Dyn32", Type, 0, ""},
+ {"Dyn32.Tag", Field, 0, ""},
+ {"Dyn32.Val", Field, 0, ""},
+ {"Dyn64", Type, 0, ""},
+ {"Dyn64.Tag", Field, 0, ""},
+ {"Dyn64.Val", Field, 0, ""},
+ {"DynFlag", Type, 0, ""},
+ {"DynFlag1", Type, 21, ""},
+ {"DynTag", Type, 0, ""},
+ {"DynamicVersion", Type, 24, ""},
+ {"DynamicVersion.Deps", Field, 24, ""},
+ {"DynamicVersion.Flags", Field, 24, ""},
+ {"DynamicVersion.Index", Field, 24, ""},
+ {"DynamicVersion.Name", Field, 24, ""},
+ {"DynamicVersionDep", Type, 24, ""},
+ {"DynamicVersionDep.Dep", Field, 24, ""},
+ {"DynamicVersionDep.Flags", Field, 24, ""},
+ {"DynamicVersionDep.Index", Field, 24, ""},
+ {"DynamicVersionFlag", Type, 24, ""},
+ {"DynamicVersionNeed", Type, 24, ""},
+ {"DynamicVersionNeed.Name", Field, 24, ""},
+ {"DynamicVersionNeed.Needs", Field, 24, ""},
+ {"EI_ABIVERSION", Const, 0, ""},
+ {"EI_CLASS", Const, 0, ""},
+ {"EI_DATA", Const, 0, ""},
+ {"EI_NIDENT", Const, 0, ""},
+ {"EI_OSABI", Const, 0, ""},
+ {"EI_PAD", Const, 0, ""},
+ {"EI_VERSION", Const, 0, ""},
+ {"ELFCLASS32", Const, 0, ""},
+ {"ELFCLASS64", Const, 0, ""},
+ {"ELFCLASSNONE", Const, 0, ""},
+ {"ELFDATA2LSB", Const, 0, ""},
+ {"ELFDATA2MSB", Const, 0, ""},
+ {"ELFDATANONE", Const, 0, ""},
+ {"ELFMAG", Const, 0, ""},
+ {"ELFOSABI_86OPEN", Const, 0, ""},
+ {"ELFOSABI_AIX", Const, 0, ""},
+ {"ELFOSABI_ARM", Const, 0, ""},
+ {"ELFOSABI_AROS", Const, 11, ""},
+ {"ELFOSABI_CLOUDABI", Const, 11, ""},
+ {"ELFOSABI_FENIXOS", Const, 11, ""},
+ {"ELFOSABI_FREEBSD", Const, 0, ""},
+ {"ELFOSABI_HPUX", Const, 0, ""},
+ {"ELFOSABI_HURD", Const, 0, ""},
+ {"ELFOSABI_IRIX", Const, 0, ""},
+ {"ELFOSABI_LINUX", Const, 0, ""},
+ {"ELFOSABI_MODESTO", Const, 0, ""},
+ {"ELFOSABI_NETBSD", Const, 0, ""},
+ {"ELFOSABI_NONE", Const, 0, ""},
+ {"ELFOSABI_NSK", Const, 0, ""},
+ {"ELFOSABI_OPENBSD", Const, 0, ""},
+ {"ELFOSABI_OPENVMS", Const, 0, ""},
+ {"ELFOSABI_SOLARIS", Const, 0, ""},
+ {"ELFOSABI_STANDALONE", Const, 0, ""},
+ {"ELFOSABI_TRU64", Const, 0, ""},
+ {"EM_386", Const, 0, ""},
+ {"EM_486", Const, 0, ""},
+ {"EM_56800EX", Const, 11, ""},
+ {"EM_68HC05", Const, 11, ""},
+ {"EM_68HC08", Const, 11, ""},
+ {"EM_68HC11", Const, 11, ""},
+ {"EM_68HC12", Const, 0, ""},
+ {"EM_68HC16", Const, 11, ""},
+ {"EM_68K", Const, 0, ""},
+ {"EM_78KOR", Const, 11, ""},
+ {"EM_8051", Const, 11, ""},
+ {"EM_860", Const, 0, ""},
+ {"EM_88K", Const, 0, ""},
+ {"EM_960", Const, 0, ""},
+ {"EM_AARCH64", Const, 4, ""},
+ {"EM_ALPHA", Const, 0, ""},
+ {"EM_ALPHA_STD", Const, 0, ""},
+ {"EM_ALTERA_NIOS2", Const, 11, ""},
+ {"EM_AMDGPU", Const, 11, ""},
+ {"EM_ARC", Const, 0, ""},
+ {"EM_ARCA", Const, 11, ""},
+ {"EM_ARC_COMPACT", Const, 11, ""},
+ {"EM_ARC_COMPACT2", Const, 11, ""},
+ {"EM_ARM", Const, 0, ""},
+ {"EM_AVR", Const, 11, ""},
+ {"EM_AVR32", Const, 11, ""},
+ {"EM_BA1", Const, 11, ""},
+ {"EM_BA2", Const, 11, ""},
+ {"EM_BLACKFIN", Const, 11, ""},
+ {"EM_BPF", Const, 11, ""},
+ {"EM_C166", Const, 11, ""},
+ {"EM_CDP", Const, 11, ""},
+ {"EM_CE", Const, 11, ""},
+ {"EM_CLOUDSHIELD", Const, 11, ""},
+ {"EM_COGE", Const, 11, ""},
+ {"EM_COLDFIRE", Const, 0, ""},
+ {"EM_COOL", Const, 11, ""},
+ {"EM_COREA_1ST", Const, 11, ""},
+ {"EM_COREA_2ND", Const, 11, ""},
+ {"EM_CR", Const, 11, ""},
+ {"EM_CR16", Const, 11, ""},
+ {"EM_CRAYNV2", Const, 11, ""},
+ {"EM_CRIS", Const, 11, ""},
+ {"EM_CRX", Const, 11, ""},
+ {"EM_CSR_KALIMBA", Const, 11, ""},
+ {"EM_CUDA", Const, 11, ""},
+ {"EM_CYPRESS_M8C", Const, 11, ""},
+ {"EM_D10V", Const, 11, ""},
+ {"EM_D30V", Const, 11, ""},
+ {"EM_DSP24", Const, 11, ""},
+ {"EM_DSPIC30F", Const, 11, ""},
+ {"EM_DXP", Const, 11, ""},
+ {"EM_ECOG1", Const, 11, ""},
+ {"EM_ECOG16", Const, 11, ""},
+ {"EM_ECOG1X", Const, 11, ""},
+ {"EM_ECOG2", Const, 11, ""},
+ {"EM_ETPU", Const, 11, ""},
+ {"EM_EXCESS", Const, 11, ""},
+ {"EM_F2MC16", Const, 11, ""},
+ {"EM_FIREPATH", Const, 11, ""},
+ {"EM_FR20", Const, 0, ""},
+ {"EM_FR30", Const, 11, ""},
+ {"EM_FT32", Const, 11, ""},
+ {"EM_FX66", Const, 11, ""},
+ {"EM_H8S", Const, 0, ""},
+ {"EM_H8_300", Const, 0, ""},
+ {"EM_H8_300H", Const, 0, ""},
+ {"EM_H8_500", Const, 0, ""},
+ {"EM_HUANY", Const, 11, ""},
+ {"EM_IA_64", Const, 0, ""},
+ {"EM_INTEL205", Const, 11, ""},
+ {"EM_INTEL206", Const, 11, ""},
+ {"EM_INTEL207", Const, 11, ""},
+ {"EM_INTEL208", Const, 11, ""},
+ {"EM_INTEL209", Const, 11, ""},
+ {"EM_IP2K", Const, 11, ""},
+ {"EM_JAVELIN", Const, 11, ""},
+ {"EM_K10M", Const, 11, ""},
+ {"EM_KM32", Const, 11, ""},
+ {"EM_KMX16", Const, 11, ""},
+ {"EM_KMX32", Const, 11, ""},
+ {"EM_KMX8", Const, 11, ""},
+ {"EM_KVARC", Const, 11, ""},
+ {"EM_L10M", Const, 11, ""},
+ {"EM_LANAI", Const, 11, ""},
+ {"EM_LATTICEMICO32", Const, 11, ""},
+ {"EM_LOONGARCH", Const, 19, ""},
+ {"EM_M16C", Const, 11, ""},
+ {"EM_M32", Const, 0, ""},
+ {"EM_M32C", Const, 11, ""},
+ {"EM_M32R", Const, 11, ""},
+ {"EM_MANIK", Const, 11, ""},
+ {"EM_MAX", Const, 11, ""},
+ {"EM_MAXQ30", Const, 11, ""},
+ {"EM_MCHP_PIC", Const, 11, ""},
+ {"EM_MCST_ELBRUS", Const, 11, ""},
+ {"EM_ME16", Const, 0, ""},
+ {"EM_METAG", Const, 11, ""},
+ {"EM_MICROBLAZE", Const, 11, ""},
+ {"EM_MIPS", Const, 0, ""},
+ {"EM_MIPS_RS3_LE", Const, 0, ""},
+ {"EM_MIPS_RS4_BE", Const, 0, ""},
+ {"EM_MIPS_X", Const, 0, ""},
+ {"EM_MMA", Const, 0, ""},
+ {"EM_MMDSP_PLUS", Const, 11, ""},
+ {"EM_MMIX", Const, 11, ""},
+ {"EM_MN10200", Const, 11, ""},
+ {"EM_MN10300", Const, 11, ""},
+ {"EM_MOXIE", Const, 11, ""},
+ {"EM_MSP430", Const, 11, ""},
+ {"EM_NCPU", Const, 0, ""},
+ {"EM_NDR1", Const, 0, ""},
+ {"EM_NDS32", Const, 11, ""},
+ {"EM_NONE", Const, 0, ""},
+ {"EM_NORC", Const, 11, ""},
+ {"EM_NS32K", Const, 11, ""},
+ {"EM_OPEN8", Const, 11, ""},
+ {"EM_OPENRISC", Const, 11, ""},
+ {"EM_PARISC", Const, 0, ""},
+ {"EM_PCP", Const, 0, ""},
+ {"EM_PDP10", Const, 11, ""},
+ {"EM_PDP11", Const, 11, ""},
+ {"EM_PDSP", Const, 11, ""},
+ {"EM_PJ", Const, 11, ""},
+ {"EM_PPC", Const, 0, ""},
+ {"EM_PPC64", Const, 0, ""},
+ {"EM_PRISM", Const, 11, ""},
+ {"EM_QDSP6", Const, 11, ""},
+ {"EM_R32C", Const, 11, ""},
+ {"EM_RCE", Const, 0, ""},
+ {"EM_RH32", Const, 0, ""},
+ {"EM_RISCV", Const, 11, ""},
+ {"EM_RL78", Const, 11, ""},
+ {"EM_RS08", Const, 11, ""},
+ {"EM_RX", Const, 11, ""},
+ {"EM_S370", Const, 0, ""},
+ {"EM_S390", Const, 0, ""},
+ {"EM_SCORE7", Const, 11, ""},
+ {"EM_SEP", Const, 11, ""},
+ {"EM_SE_C17", Const, 11, ""},
+ {"EM_SE_C33", Const, 11, ""},
+ {"EM_SH", Const, 0, ""},
+ {"EM_SHARC", Const, 11, ""},
+ {"EM_SLE9X", Const, 11, ""},
+ {"EM_SNP1K", Const, 11, ""},
+ {"EM_SPARC", Const, 0, ""},
+ {"EM_SPARC32PLUS", Const, 0, ""},
+ {"EM_SPARCV9", Const, 0, ""},
+ {"EM_ST100", Const, 0, ""},
+ {"EM_ST19", Const, 11, ""},
+ {"EM_ST200", Const, 11, ""},
+ {"EM_ST7", Const, 11, ""},
+ {"EM_ST9PLUS", Const, 11, ""},
+ {"EM_STARCORE", Const, 0, ""},
+ {"EM_STM8", Const, 11, ""},
+ {"EM_STXP7X", Const, 11, ""},
+ {"EM_SVX", Const, 11, ""},
+ {"EM_TILE64", Const, 11, ""},
+ {"EM_TILEGX", Const, 11, ""},
+ {"EM_TILEPRO", Const, 11, ""},
+ {"EM_TINYJ", Const, 0, ""},
+ {"EM_TI_ARP32", Const, 11, ""},
+ {"EM_TI_C2000", Const, 11, ""},
+ {"EM_TI_C5500", Const, 11, ""},
+ {"EM_TI_C6000", Const, 11, ""},
+ {"EM_TI_PRU", Const, 11, ""},
+ {"EM_TMM_GPP", Const, 11, ""},
+ {"EM_TPC", Const, 11, ""},
+ {"EM_TRICORE", Const, 0, ""},
+ {"EM_TRIMEDIA", Const, 11, ""},
+ {"EM_TSK3000", Const, 11, ""},
+ {"EM_UNICORE", Const, 11, ""},
+ {"EM_V800", Const, 0, ""},
+ {"EM_V850", Const, 11, ""},
+ {"EM_VAX", Const, 11, ""},
+ {"EM_VIDEOCORE", Const, 11, ""},
+ {"EM_VIDEOCORE3", Const, 11, ""},
+ {"EM_VIDEOCORE5", Const, 11, ""},
+ {"EM_VISIUM", Const, 11, ""},
+ {"EM_VPP500", Const, 0, ""},
+ {"EM_X86_64", Const, 0, ""},
+ {"EM_XCORE", Const, 11, ""},
+ {"EM_XGATE", Const, 11, ""},
+ {"EM_XIMO16", Const, 11, ""},
+ {"EM_XTENSA", Const, 11, ""},
+ {"EM_Z80", Const, 11, ""},
+ {"EM_ZSP", Const, 11, ""},
+ {"ET_CORE", Const, 0, ""},
+ {"ET_DYN", Const, 0, ""},
+ {"ET_EXEC", Const, 0, ""},
+ {"ET_HIOS", Const, 0, ""},
+ {"ET_HIPROC", Const, 0, ""},
+ {"ET_LOOS", Const, 0, ""},
+ {"ET_LOPROC", Const, 0, ""},
+ {"ET_NONE", Const, 0, ""},
+ {"ET_REL", Const, 0, ""},
+ {"EV_CURRENT", Const, 0, ""},
+ {"EV_NONE", Const, 0, ""},
+ {"ErrNoSymbols", Var, 4, ""},
+ {"File", Type, 0, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"File.Progs", Field, 0, ""},
+ {"File.Sections", Field, 0, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.ABIVersion", Field, 0, ""},
+ {"FileHeader.ByteOrder", Field, 0, ""},
+ {"FileHeader.Class", Field, 0, ""},
+ {"FileHeader.Data", Field, 0, ""},
+ {"FileHeader.Entry", Field, 1, ""},
+ {"FileHeader.Machine", Field, 0, ""},
+ {"FileHeader.OSABI", Field, 0, ""},
+ {"FileHeader.Type", Field, 0, ""},
+ {"FileHeader.Version", Field, 0, ""},
+ {"FormatError", Type, 0, ""},
+ {"Header32", Type, 0, ""},
+ {"Header32.Ehsize", Field, 0, ""},
+ {"Header32.Entry", Field, 0, ""},
+ {"Header32.Flags", Field, 0, ""},
+ {"Header32.Ident", Field, 0, ""},
+ {"Header32.Machine", Field, 0, ""},
+ {"Header32.Phentsize", Field, 0, ""},
+ {"Header32.Phnum", Field, 0, ""},
+ {"Header32.Phoff", Field, 0, ""},
+ {"Header32.Shentsize", Field, 0, ""},
+ {"Header32.Shnum", Field, 0, ""},
+ {"Header32.Shoff", Field, 0, ""},
+ {"Header32.Shstrndx", Field, 0, ""},
+ {"Header32.Type", Field, 0, ""},
+ {"Header32.Version", Field, 0, ""},
+ {"Header64", Type, 0, ""},
+ {"Header64.Ehsize", Field, 0, ""},
+ {"Header64.Entry", Field, 0, ""},
+ {"Header64.Flags", Field, 0, ""},
+ {"Header64.Ident", Field, 0, ""},
+ {"Header64.Machine", Field, 0, ""},
+ {"Header64.Phentsize", Field, 0, ""},
+ {"Header64.Phnum", Field, 0, ""},
+ {"Header64.Phoff", Field, 0, ""},
+ {"Header64.Shentsize", Field, 0, ""},
+ {"Header64.Shnum", Field, 0, ""},
+ {"Header64.Shoff", Field, 0, ""},
+ {"Header64.Shstrndx", Field, 0, ""},
+ {"Header64.Type", Field, 0, ""},
+ {"Header64.Version", Field, 0, ""},
+ {"ImportedSymbol", Type, 0, ""},
+ {"ImportedSymbol.Library", Field, 0, ""},
+ {"ImportedSymbol.Name", Field, 0, ""},
+ {"ImportedSymbol.Version", Field, 0, ""},
+ {"Machine", Type, 0, ""},
+ {"NT_FPREGSET", Const, 0, ""},
+ {"NT_PRPSINFO", Const, 0, ""},
+ {"NT_PRSTATUS", Const, 0, ""},
+ {"NType", Type, 0, ""},
+ {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+ {"OSABI", Type, 0, ""},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"PF_MASKOS", Const, 0, ""},
+ {"PF_MASKPROC", Const, 0, ""},
+ {"PF_R", Const, 0, ""},
+ {"PF_W", Const, 0, ""},
+ {"PF_X", Const, 0, ""},
+ {"PT_AARCH64_ARCHEXT", Const, 16, ""},
+ {"PT_AARCH64_UNWIND", Const, 16, ""},
+ {"PT_ARM_ARCHEXT", Const, 16, ""},
+ {"PT_ARM_EXIDX", Const, 16, ""},
+ {"PT_DYNAMIC", Const, 0, ""},
+ {"PT_GNU_EH_FRAME", Const, 16, ""},
+ {"PT_GNU_MBIND_HI", Const, 16, ""},
+ {"PT_GNU_MBIND_LO", Const, 16, ""},
+ {"PT_GNU_PROPERTY", Const, 16, ""},
+ {"PT_GNU_RELRO", Const, 16, ""},
+ {"PT_GNU_STACK", Const, 16, ""},
+ {"PT_HIOS", Const, 0, ""},
+ {"PT_HIPROC", Const, 0, ""},
+ {"PT_INTERP", Const, 0, ""},
+ {"PT_LOAD", Const, 0, ""},
+ {"PT_LOOS", Const, 0, ""},
+ {"PT_LOPROC", Const, 0, ""},
+ {"PT_MIPS_ABIFLAGS", Const, 16, ""},
+ {"PT_MIPS_OPTIONS", Const, 16, ""},
+ {"PT_MIPS_REGINFO", Const, 16, ""},
+ {"PT_MIPS_RTPROC", Const, 16, ""},
+ {"PT_NOTE", Const, 0, ""},
+ {"PT_NULL", Const, 0, ""},
+ {"PT_OPENBSD_BOOTDATA", Const, 16, ""},
+ {"PT_OPENBSD_NOBTCFI", Const, 23, ""},
+ {"PT_OPENBSD_RANDOMIZE", Const, 16, ""},
+ {"PT_OPENBSD_WXNEEDED", Const, 16, ""},
+ {"PT_PAX_FLAGS", Const, 16, ""},
+ {"PT_PHDR", Const, 0, ""},
+ {"PT_RISCV_ATTRIBUTES", Const, 25, ""},
+ {"PT_S390_PGSTE", Const, 16, ""},
+ {"PT_SHLIB", Const, 0, ""},
+ {"PT_SUNWSTACK", Const, 16, ""},
+ {"PT_SUNW_EH_FRAME", Const, 16, ""},
+ {"PT_TLS", Const, 0, ""},
+ {"Prog", Type, 0, ""},
+ {"Prog.ProgHeader", Field, 0, ""},
+ {"Prog.ReaderAt", Field, 0, ""},
+ {"Prog32", Type, 0, ""},
+ {"Prog32.Align", Field, 0, ""},
+ {"Prog32.Filesz", Field, 0, ""},
+ {"Prog32.Flags", Field, 0, ""},
+ {"Prog32.Memsz", Field, 0, ""},
+ {"Prog32.Off", Field, 0, ""},
+ {"Prog32.Paddr", Field, 0, ""},
+ {"Prog32.Type", Field, 0, ""},
+ {"Prog32.Vaddr", Field, 0, ""},
+ {"Prog64", Type, 0, ""},
+ {"Prog64.Align", Field, 0, ""},
+ {"Prog64.Filesz", Field, 0, ""},
+ {"Prog64.Flags", Field, 0, ""},
+ {"Prog64.Memsz", Field, 0, ""},
+ {"Prog64.Off", Field, 0, ""},
+ {"Prog64.Paddr", Field, 0, ""},
+ {"Prog64.Type", Field, 0, ""},
+ {"Prog64.Vaddr", Field, 0, ""},
+ {"ProgFlag", Type, 0, ""},
+ {"ProgHeader", Type, 0, ""},
+ {"ProgHeader.Align", Field, 0, ""},
+ {"ProgHeader.Filesz", Field, 0, ""},
+ {"ProgHeader.Flags", Field, 0, ""},
+ {"ProgHeader.Memsz", Field, 0, ""},
+ {"ProgHeader.Off", Field, 0, ""},
+ {"ProgHeader.Paddr", Field, 0, ""},
+ {"ProgHeader.Type", Field, 0, ""},
+ {"ProgHeader.Vaddr", Field, 0, ""},
+ {"ProgType", Type, 0, ""},
+ {"R_386", Type, 0, ""},
+ {"R_386_16", Const, 10, ""},
+ {"R_386_32", Const, 0, ""},
+ {"R_386_32PLT", Const, 10, ""},
+ {"R_386_8", Const, 10, ""},
+ {"R_386_COPY", Const, 0, ""},
+ {"R_386_GLOB_DAT", Const, 0, ""},
+ {"R_386_GOT32", Const, 0, ""},
+ {"R_386_GOT32X", Const, 10, ""},
+ {"R_386_GOTOFF", Const, 0, ""},
+ {"R_386_GOTPC", Const, 0, ""},
+ {"R_386_IRELATIVE", Const, 10, ""},
+ {"R_386_JMP_SLOT", Const, 0, ""},
+ {"R_386_NONE", Const, 0, ""},
+ {"R_386_PC16", Const, 10, ""},
+ {"R_386_PC32", Const, 0, ""},
+ {"R_386_PC8", Const, 10, ""},
+ {"R_386_PLT32", Const, 0, ""},
+ {"R_386_RELATIVE", Const, 0, ""},
+ {"R_386_SIZE32", Const, 10, ""},
+ {"R_386_TLS_DESC", Const, 10, ""},
+ {"R_386_TLS_DESC_CALL", Const, 10, ""},
+ {"R_386_TLS_DTPMOD32", Const, 0, ""},
+ {"R_386_TLS_DTPOFF32", Const, 0, ""},
+ {"R_386_TLS_GD", Const, 0, ""},
+ {"R_386_TLS_GD_32", Const, 0, ""},
+ {"R_386_TLS_GD_CALL", Const, 0, ""},
+ {"R_386_TLS_GD_POP", Const, 0, ""},
+ {"R_386_TLS_GD_PUSH", Const, 0, ""},
+ {"R_386_TLS_GOTDESC", Const, 10, ""},
+ {"R_386_TLS_GOTIE", Const, 0, ""},
+ {"R_386_TLS_IE", Const, 0, ""},
+ {"R_386_TLS_IE_32", Const, 0, ""},
+ {"R_386_TLS_LDM", Const, 0, ""},
+ {"R_386_TLS_LDM_32", Const, 0, ""},
+ {"R_386_TLS_LDM_CALL", Const, 0, ""},
+ {"R_386_TLS_LDM_POP", Const, 0, ""},
+ {"R_386_TLS_LDM_PUSH", Const, 0, ""},
+ {"R_386_TLS_LDO_32", Const, 0, ""},
+ {"R_386_TLS_LE", Const, 0, ""},
+ {"R_386_TLS_LE_32", Const, 0, ""},
+ {"R_386_TLS_TPOFF", Const, 0, ""},
+ {"R_386_TLS_TPOFF32", Const, 0, ""},
+ {"R_390", Type, 7, ""},
+ {"R_390_12", Const, 7, ""},
+ {"R_390_16", Const, 7, ""},
+ {"R_390_20", Const, 7, ""},
+ {"R_390_32", Const, 7, ""},
+ {"R_390_64", Const, 7, ""},
+ {"R_390_8", Const, 7, ""},
+ {"R_390_COPY", Const, 7, ""},
+ {"R_390_GLOB_DAT", Const, 7, ""},
+ {"R_390_GOT12", Const, 7, ""},
+ {"R_390_GOT16", Const, 7, ""},
+ {"R_390_GOT20", Const, 7, ""},
+ {"R_390_GOT32", Const, 7, ""},
+ {"R_390_GOT64", Const, 7, ""},
+ {"R_390_GOTENT", Const, 7, ""},
+ {"R_390_GOTOFF", Const, 7, ""},
+ {"R_390_GOTOFF16", Const, 7, ""},
+ {"R_390_GOTOFF64", Const, 7, ""},
+ {"R_390_GOTPC", Const, 7, ""},
+ {"R_390_GOTPCDBL", Const, 7, ""},
+ {"R_390_GOTPLT12", Const, 7, ""},
+ {"R_390_GOTPLT16", Const, 7, ""},
+ {"R_390_GOTPLT20", Const, 7, ""},
+ {"R_390_GOTPLT32", Const, 7, ""},
+ {"R_390_GOTPLT64", Const, 7, ""},
+ {"R_390_GOTPLTENT", Const, 7, ""},
+ {"R_390_GOTPLTOFF16", Const, 7, ""},
+ {"R_390_GOTPLTOFF32", Const, 7, ""},
+ {"R_390_GOTPLTOFF64", Const, 7, ""},
+ {"R_390_JMP_SLOT", Const, 7, ""},
+ {"R_390_NONE", Const, 7, ""},
+ {"R_390_PC16", Const, 7, ""},
+ {"R_390_PC16DBL", Const, 7, ""},
+ {"R_390_PC32", Const, 7, ""},
+ {"R_390_PC32DBL", Const, 7, ""},
+ {"R_390_PC64", Const, 7, ""},
+ {"R_390_PLT16DBL", Const, 7, ""},
+ {"R_390_PLT32", Const, 7, ""},
+ {"R_390_PLT32DBL", Const, 7, ""},
+ {"R_390_PLT64", Const, 7, ""},
+ {"R_390_RELATIVE", Const, 7, ""},
+ {"R_390_TLS_DTPMOD", Const, 7, ""},
+ {"R_390_TLS_DTPOFF", Const, 7, ""},
+ {"R_390_TLS_GD32", Const, 7, ""},
+ {"R_390_TLS_GD64", Const, 7, ""},
+ {"R_390_TLS_GDCALL", Const, 7, ""},
+ {"R_390_TLS_GOTIE12", Const, 7, ""},
+ {"R_390_TLS_GOTIE20", Const, 7, ""},
+ {"R_390_TLS_GOTIE32", Const, 7, ""},
+ {"R_390_TLS_GOTIE64", Const, 7, ""},
+ {"R_390_TLS_IE32", Const, 7, ""},
+ {"R_390_TLS_IE64", Const, 7, ""},
+ {"R_390_TLS_IEENT", Const, 7, ""},
+ {"R_390_TLS_LDCALL", Const, 7, ""},
+ {"R_390_TLS_LDM32", Const, 7, ""},
+ {"R_390_TLS_LDM64", Const, 7, ""},
+ {"R_390_TLS_LDO32", Const, 7, ""},
+ {"R_390_TLS_LDO64", Const, 7, ""},
+ {"R_390_TLS_LE32", Const, 7, ""},
+ {"R_390_TLS_LE64", Const, 7, ""},
+ {"R_390_TLS_LOAD", Const, 7, ""},
+ {"R_390_TLS_TPOFF", Const, 7, ""},
+ {"R_AARCH64", Type, 4, ""},
+ {"R_AARCH64_ABS16", Const, 4, ""},
+ {"R_AARCH64_ABS32", Const, 4, ""},
+ {"R_AARCH64_ABS64", Const, 4, ""},
+ {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_ADR_GOT_PAGE", Const, 4, ""},
+ {"R_AARCH64_ADR_PREL_LO21", Const, 4, ""},
+ {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4, ""},
+ {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4, ""},
+ {"R_AARCH64_CALL26", Const, 4, ""},
+ {"R_AARCH64_CONDBR19", Const, 4, ""},
+ {"R_AARCH64_COPY", Const, 4, ""},
+ {"R_AARCH64_GLOB_DAT", Const, 4, ""},
+ {"R_AARCH64_GOT_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_IRELATIVE", Const, 4, ""},
+ {"R_AARCH64_JUMP26", Const, 4, ""},
+ {"R_AARCH64_JUMP_SLOT", Const, 4, ""},
+ {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10, ""},
+ {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10, ""},
+ {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LD_PREL_LO19", Const, 4, ""},
+ {"R_AARCH64_MOVW_SABS_G0", Const, 4, ""},
+ {"R_AARCH64_MOVW_SABS_G1", Const, 4, ""},
+ {"R_AARCH64_MOVW_SABS_G2", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G0", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G1", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G2", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G3", Const, 4, ""},
+ {"R_AARCH64_NONE", Const, 4, ""},
+ {"R_AARCH64_NULL", Const, 4, ""},
+ {"R_AARCH64_P32_ABS16", Const, 4, ""},
+ {"R_AARCH64_P32_ABS32", Const, 4, ""},
+ {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4, ""},
+ {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4, ""},
+ {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4, ""},
+ {"R_AARCH64_P32_CALL26", Const, 4, ""},
+ {"R_AARCH64_P32_CONDBR19", Const, 4, ""},
+ {"R_AARCH64_P32_COPY", Const, 4, ""},
+ {"R_AARCH64_P32_GLOB_DAT", Const, 4, ""},
+ {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_P32_IRELATIVE", Const, 4, ""},
+ {"R_AARCH64_P32_JUMP26", Const, 4, ""},
+ {"R_AARCH64_P32_JUMP_SLOT", Const, 4, ""},
+ {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LD_PREL_LO19", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4, ""},
+ {"R_AARCH64_P32_PREL16", Const, 4, ""},
+ {"R_AARCH64_P32_PREL32", Const, 4, ""},
+ {"R_AARCH64_P32_RELATIVE", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_CALL", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
+ {"R_AARCH64_P32_TLS_DTPMOD", Const, 4, ""},
+ {"R_AARCH64_P32_TLS_DTPREL", Const, 4, ""},
+ {"R_AARCH64_P32_TLS_TPREL", Const, 4, ""},
+ {"R_AARCH64_P32_TSTBR14", Const, 4, ""},
+ {"R_AARCH64_PREL16", Const, 4, ""},
+ {"R_AARCH64_PREL32", Const, 4, ""},
+ {"R_AARCH64_PREL64", Const, 4, ""},
+ {"R_AARCH64_RELATIVE", Const, 4, ""},
+ {"R_AARCH64_TLSDESC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADD", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_CALL", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_LDR", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_OFF_G1", Const, 4, ""},
+ {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10, ""},
+ {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10, ""},
+ {"R_AARCH64_TLSGD_MOVW_G1", Const, 10, ""},
+ {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
+ {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
+ {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4, ""},
+ {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4, ""},
+ {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10, ""},
+ {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10, ""},
+ {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10, ""},
+ {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10, ""},
+ {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
+ {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
+ {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10, ""},
+ {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4, ""},
+ {"R_AARCH64_TLS_DTPMOD64", Const, 4, ""},
+ {"R_AARCH64_TLS_DTPREL64", Const, 4, ""},
+ {"R_AARCH64_TLS_TPREL64", Const, 4, ""},
+ {"R_AARCH64_TSTBR14", Const, 4, ""},
+ {"R_ALPHA", Type, 0, ""},
+ {"R_ALPHA_BRADDR", Const, 0, ""},
+ {"R_ALPHA_COPY", Const, 0, ""},
+ {"R_ALPHA_GLOB_DAT", Const, 0, ""},
+ {"R_ALPHA_GPDISP", Const, 0, ""},
+ {"R_ALPHA_GPREL32", Const, 0, ""},
+ {"R_ALPHA_GPRELHIGH", Const, 0, ""},
+ {"R_ALPHA_GPRELLOW", Const, 0, ""},
+ {"R_ALPHA_GPVALUE", Const, 0, ""},
+ {"R_ALPHA_HINT", Const, 0, ""},
+ {"R_ALPHA_IMMED_BR_HI32", Const, 0, ""},
+ {"R_ALPHA_IMMED_GP_16", Const, 0, ""},
+ {"R_ALPHA_IMMED_GP_HI32", Const, 0, ""},
+ {"R_ALPHA_IMMED_LO32", Const, 0, ""},
+ {"R_ALPHA_IMMED_SCN_HI32", Const, 0, ""},
+ {"R_ALPHA_JMP_SLOT", Const, 0, ""},
+ {"R_ALPHA_LITERAL", Const, 0, ""},
+ {"R_ALPHA_LITUSE", Const, 0, ""},
+ {"R_ALPHA_NONE", Const, 0, ""},
+ {"R_ALPHA_OP_PRSHIFT", Const, 0, ""},
+ {"R_ALPHA_OP_PSUB", Const, 0, ""},
+ {"R_ALPHA_OP_PUSH", Const, 0, ""},
+ {"R_ALPHA_OP_STORE", Const, 0, ""},
+ {"R_ALPHA_REFLONG", Const, 0, ""},
+ {"R_ALPHA_REFQUAD", Const, 0, ""},
+ {"R_ALPHA_RELATIVE", Const, 0, ""},
+ {"R_ALPHA_SREL16", Const, 0, ""},
+ {"R_ALPHA_SREL32", Const, 0, ""},
+ {"R_ALPHA_SREL64", Const, 0, ""},
+ {"R_ARM", Type, 0, ""},
+ {"R_ARM_ABS12", Const, 0, ""},
+ {"R_ARM_ABS16", Const, 0, ""},
+ {"R_ARM_ABS32", Const, 0, ""},
+ {"R_ARM_ABS32_NOI", Const, 10, ""},
+ {"R_ARM_ABS8", Const, 0, ""},
+ {"R_ARM_ALU_PCREL_15_8", Const, 10, ""},
+ {"R_ARM_ALU_PCREL_23_15", Const, 10, ""},
+ {"R_ARM_ALU_PCREL_7_0", Const, 10, ""},
+ {"R_ARM_ALU_PC_G0", Const, 10, ""},
+ {"R_ARM_ALU_PC_G0_NC", Const, 10, ""},
+ {"R_ARM_ALU_PC_G1", Const, 10, ""},
+ {"R_ARM_ALU_PC_G1_NC", Const, 10, ""},
+ {"R_ARM_ALU_PC_G2", Const, 10, ""},
+ {"R_ARM_ALU_SBREL_19_12_NC", Const, 10, ""},
+ {"R_ARM_ALU_SBREL_27_20_CK", Const, 10, ""},
+ {"R_ARM_ALU_SB_G0", Const, 10, ""},
+ {"R_ARM_ALU_SB_G0_NC", Const, 10, ""},
+ {"R_ARM_ALU_SB_G1", Const, 10, ""},
+ {"R_ARM_ALU_SB_G1_NC", Const, 10, ""},
+ {"R_ARM_ALU_SB_G2", Const, 10, ""},
+ {"R_ARM_AMP_VCALL9", Const, 0, ""},
+ {"R_ARM_BASE_ABS", Const, 10, ""},
+ {"R_ARM_CALL", Const, 10, ""},
+ {"R_ARM_COPY", Const, 0, ""},
+ {"R_ARM_GLOB_DAT", Const, 0, ""},
+ {"R_ARM_GNU_VTENTRY", Const, 0, ""},
+ {"R_ARM_GNU_VTINHERIT", Const, 0, ""},
+ {"R_ARM_GOT32", Const, 0, ""},
+ {"R_ARM_GOTOFF", Const, 0, ""},
+ {"R_ARM_GOTOFF12", Const, 10, ""},
+ {"R_ARM_GOTPC", Const, 0, ""},
+ {"R_ARM_GOTRELAX", Const, 10, ""},
+ {"R_ARM_GOT_ABS", Const, 10, ""},
+ {"R_ARM_GOT_BREL12", Const, 10, ""},
+ {"R_ARM_GOT_PREL", Const, 10, ""},
+ {"R_ARM_IRELATIVE", Const, 10, ""},
+ {"R_ARM_JUMP24", Const, 10, ""},
+ {"R_ARM_JUMP_SLOT", Const, 0, ""},
+ {"R_ARM_LDC_PC_G0", Const, 10, ""},
+ {"R_ARM_LDC_PC_G1", Const, 10, ""},
+ {"R_ARM_LDC_PC_G2", Const, 10, ""},
+ {"R_ARM_LDC_SB_G0", Const, 10, ""},
+ {"R_ARM_LDC_SB_G1", Const, 10, ""},
+ {"R_ARM_LDC_SB_G2", Const, 10, ""},
+ {"R_ARM_LDRS_PC_G0", Const, 10, ""},
+ {"R_ARM_LDRS_PC_G1", Const, 10, ""},
+ {"R_ARM_LDRS_PC_G2", Const, 10, ""},
+ {"R_ARM_LDRS_SB_G0", Const, 10, ""},
+ {"R_ARM_LDRS_SB_G1", Const, 10, ""},
+ {"R_ARM_LDRS_SB_G2", Const, 10, ""},
+ {"R_ARM_LDR_PC_G1", Const, 10, ""},
+ {"R_ARM_LDR_PC_G2", Const, 10, ""},
+ {"R_ARM_LDR_SBREL_11_10_NC", Const, 10, ""},
+ {"R_ARM_LDR_SB_G0", Const, 10, ""},
+ {"R_ARM_LDR_SB_G1", Const, 10, ""},
+ {"R_ARM_LDR_SB_G2", Const, 10, ""},
+ {"R_ARM_ME_TOO", Const, 10, ""},
+ {"R_ARM_MOVT_ABS", Const, 10, ""},
+ {"R_ARM_MOVT_BREL", Const, 10, ""},
+ {"R_ARM_MOVT_PREL", Const, 10, ""},
+ {"R_ARM_MOVW_ABS_NC", Const, 10, ""},
+ {"R_ARM_MOVW_BREL", Const, 10, ""},
+ {"R_ARM_MOVW_BREL_NC", Const, 10, ""},
+ {"R_ARM_MOVW_PREL_NC", Const, 10, ""},
+ {"R_ARM_NONE", Const, 0, ""},
+ {"R_ARM_PC13", Const, 0, ""},
+ {"R_ARM_PC24", Const, 0, ""},
+ {"R_ARM_PLT32", Const, 0, ""},
+ {"R_ARM_PLT32_ABS", Const, 10, ""},
+ {"R_ARM_PREL31", Const, 10, ""},
+ {"R_ARM_PRIVATE_0", Const, 10, ""},
+ {"R_ARM_PRIVATE_1", Const, 10, ""},
+ {"R_ARM_PRIVATE_10", Const, 10, ""},
+ {"R_ARM_PRIVATE_11", Const, 10, ""},
+ {"R_ARM_PRIVATE_12", Const, 10, ""},
+ {"R_ARM_PRIVATE_13", Const, 10, ""},
+ {"R_ARM_PRIVATE_14", Const, 10, ""},
+ {"R_ARM_PRIVATE_15", Const, 10, ""},
+ {"R_ARM_PRIVATE_2", Const, 10, ""},
+ {"R_ARM_PRIVATE_3", Const, 10, ""},
+ {"R_ARM_PRIVATE_4", Const, 10, ""},
+ {"R_ARM_PRIVATE_5", Const, 10, ""},
+ {"R_ARM_PRIVATE_6", Const, 10, ""},
+ {"R_ARM_PRIVATE_7", Const, 10, ""},
+ {"R_ARM_PRIVATE_8", Const, 10, ""},
+ {"R_ARM_PRIVATE_9", Const, 10, ""},
+ {"R_ARM_RABS32", Const, 0, ""},
+ {"R_ARM_RBASE", Const, 0, ""},
+ {"R_ARM_REL32", Const, 0, ""},
+ {"R_ARM_REL32_NOI", Const, 10, ""},
+ {"R_ARM_RELATIVE", Const, 0, ""},
+ {"R_ARM_RPC24", Const, 0, ""},
+ {"R_ARM_RREL32", Const, 0, ""},
+ {"R_ARM_RSBREL32", Const, 0, ""},
+ {"R_ARM_RXPC25", Const, 10, ""},
+ {"R_ARM_SBREL31", Const, 10, ""},
+ {"R_ARM_SBREL32", Const, 0, ""},
+ {"R_ARM_SWI24", Const, 0, ""},
+ {"R_ARM_TARGET1", Const, 10, ""},
+ {"R_ARM_TARGET2", Const, 10, ""},
+ {"R_ARM_THM_ABS5", Const, 0, ""},
+ {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10, ""},
+ {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10, ""},
+ {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10, ""},
+ {"R_ARM_THM_ALU_ABS_G3", Const, 10, ""},
+ {"R_ARM_THM_ALU_PREL_11_0", Const, 10, ""},
+ {"R_ARM_THM_GOT_BREL12", Const, 10, ""},
+ {"R_ARM_THM_JUMP11", Const, 10, ""},
+ {"R_ARM_THM_JUMP19", Const, 10, ""},
+ {"R_ARM_THM_JUMP24", Const, 10, ""},
+ {"R_ARM_THM_JUMP6", Const, 10, ""},
+ {"R_ARM_THM_JUMP8", Const, 10, ""},
+ {"R_ARM_THM_MOVT_ABS", Const, 10, ""},
+ {"R_ARM_THM_MOVT_BREL", Const, 10, ""},
+ {"R_ARM_THM_MOVT_PREL", Const, 10, ""},
+ {"R_ARM_THM_MOVW_ABS_NC", Const, 10, ""},
+ {"R_ARM_THM_MOVW_BREL", Const, 10, ""},
+ {"R_ARM_THM_MOVW_BREL_NC", Const, 10, ""},
+ {"R_ARM_THM_MOVW_PREL_NC", Const, 10, ""},
+ {"R_ARM_THM_PC12", Const, 10, ""},
+ {"R_ARM_THM_PC22", Const, 0, ""},
+ {"R_ARM_THM_PC8", Const, 0, ""},
+ {"R_ARM_THM_RPC22", Const, 0, ""},
+ {"R_ARM_THM_SWI8", Const, 0, ""},
+ {"R_ARM_THM_TLS_CALL", Const, 10, ""},
+ {"R_ARM_THM_TLS_DESCSEQ16", Const, 10, ""},
+ {"R_ARM_THM_TLS_DESCSEQ32", Const, 10, ""},
+ {"R_ARM_THM_XPC22", Const, 0, ""},
+ {"R_ARM_TLS_CALL", Const, 10, ""},
+ {"R_ARM_TLS_DESCSEQ", Const, 10, ""},
+ {"R_ARM_TLS_DTPMOD32", Const, 10, ""},
+ {"R_ARM_TLS_DTPOFF32", Const, 10, ""},
+ {"R_ARM_TLS_GD32", Const, 10, ""},
+ {"R_ARM_TLS_GOTDESC", Const, 10, ""},
+ {"R_ARM_TLS_IE12GP", Const, 10, ""},
+ {"R_ARM_TLS_IE32", Const, 10, ""},
+ {"R_ARM_TLS_LDM32", Const, 10, ""},
+ {"R_ARM_TLS_LDO12", Const, 10, ""},
+ {"R_ARM_TLS_LDO32", Const, 10, ""},
+ {"R_ARM_TLS_LE12", Const, 10, ""},
+ {"R_ARM_TLS_LE32", Const, 10, ""},
+ {"R_ARM_TLS_TPOFF32", Const, 10, ""},
+ {"R_ARM_V4BX", Const, 10, ""},
+ {"R_ARM_XPC25", Const, 0, ""},
+ {"R_INFO", Func, 0, "func(sym uint32, typ uint32) uint64"},
+ {"R_INFO32", Func, 0, "func(sym uint32, typ uint32) uint32"},
+ {"R_LARCH", Type, 19, ""},
+ {"R_LARCH_32", Const, 19, ""},
+ {"R_LARCH_32_PCREL", Const, 20, ""},
+ {"R_LARCH_64", Const, 19, ""},
+ {"R_LARCH_64_PCREL", Const, 22, ""},
+ {"R_LARCH_ABS64_HI12", Const, 20, ""},
+ {"R_LARCH_ABS64_LO20", Const, 20, ""},
+ {"R_LARCH_ABS_HI20", Const, 20, ""},
+ {"R_LARCH_ABS_LO12", Const, 20, ""},
+ {"R_LARCH_ADD16", Const, 19, ""},
+ {"R_LARCH_ADD24", Const, 19, ""},
+ {"R_LARCH_ADD32", Const, 19, ""},
+ {"R_LARCH_ADD6", Const, 22, ""},
+ {"R_LARCH_ADD64", Const, 19, ""},
+ {"R_LARCH_ADD8", Const, 19, ""},
+ {"R_LARCH_ADD_ULEB128", Const, 22, ""},
+ {"R_LARCH_ALIGN", Const, 22, ""},
+ {"R_LARCH_B16", Const, 20, ""},
+ {"R_LARCH_B21", Const, 20, ""},
+ {"R_LARCH_B26", Const, 20, ""},
+ {"R_LARCH_CFA", Const, 22, ""},
+ {"R_LARCH_COPY", Const, 19, ""},
+ {"R_LARCH_DELETE", Const, 22, ""},
+ {"R_LARCH_GNU_VTENTRY", Const, 20, ""},
+ {"R_LARCH_GNU_VTINHERIT", Const, 20, ""},
+ {"R_LARCH_GOT64_HI12", Const, 20, ""},
+ {"R_LARCH_GOT64_LO20", Const, 20, ""},
+ {"R_LARCH_GOT64_PC_HI12", Const, 20, ""},
+ {"R_LARCH_GOT64_PC_LO20", Const, 20, ""},
+ {"R_LARCH_GOT_HI20", Const, 20, ""},
+ {"R_LARCH_GOT_LO12", Const, 20, ""},
+ {"R_LARCH_GOT_PC_HI20", Const, 20, ""},
+ {"R_LARCH_GOT_PC_LO12", Const, 20, ""},
+ {"R_LARCH_IRELATIVE", Const, 19, ""},
+ {"R_LARCH_JUMP_SLOT", Const, 19, ""},
+ {"R_LARCH_MARK_LA", Const, 19, ""},
+ {"R_LARCH_MARK_PCREL", Const, 19, ""},
+ {"R_LARCH_NONE", Const, 19, ""},
+ {"R_LARCH_PCALA64_HI12", Const, 20, ""},
+ {"R_LARCH_PCALA64_LO20", Const, 20, ""},
+ {"R_LARCH_PCALA_HI20", Const, 20, ""},
+ {"R_LARCH_PCALA_LO12", Const, 20, ""},
+ {"R_LARCH_PCREL20_S2", Const, 22, ""},
+ {"R_LARCH_RELATIVE", Const, 19, ""},
+ {"R_LARCH_RELAX", Const, 20, ""},
+ {"R_LARCH_SOP_ADD", Const, 19, ""},
+ {"R_LARCH_SOP_AND", Const, 19, ""},
+ {"R_LARCH_SOP_ASSERT", Const, 19, ""},
+ {"R_LARCH_SOP_IF_ELSE", Const, 19, ""},
+ {"R_LARCH_SOP_NOT", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_12", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_16", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_5", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_5_20", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_U", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_U_10_12", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_DUP", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_GPREL", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_PCREL", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19, ""},
+ {"R_LARCH_SOP_SL", Const, 19, ""},
+ {"R_LARCH_SOP_SR", Const, 19, ""},
+ {"R_LARCH_SOP_SUB", Const, 19, ""},
+ {"R_LARCH_SUB16", Const, 19, ""},
+ {"R_LARCH_SUB24", Const, 19, ""},
+ {"R_LARCH_SUB32", Const, 19, ""},
+ {"R_LARCH_SUB6", Const, 22, ""},
+ {"R_LARCH_SUB64", Const, 19, ""},
+ {"R_LARCH_SUB8", Const, 19, ""},
+ {"R_LARCH_SUB_ULEB128", Const, 22, ""},
+ {"R_LARCH_TLS_DTPMOD32", Const, 19, ""},
+ {"R_LARCH_TLS_DTPMOD64", Const, 19, ""},
+ {"R_LARCH_TLS_DTPREL32", Const, 19, ""},
+ {"R_LARCH_TLS_DTPREL64", Const, 19, ""},
+ {"R_LARCH_TLS_GD_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_HI12", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_LO20", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_PC_HI12", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_PC_LO20", Const, 20, ""},
+ {"R_LARCH_TLS_IE_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_IE_LO12", Const, 20, ""},
+ {"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""},
+ {"R_LARCH_TLS_LD_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LE64_HI12", Const, 20, ""},
+ {"R_LARCH_TLS_LE64_LO20", Const, 20, ""},
+ {"R_LARCH_TLS_LE_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LE_LO12", Const, 20, ""},
+ {"R_LARCH_TLS_TPREL32", Const, 19, ""},
+ {"R_LARCH_TLS_TPREL64", Const, 19, ""},
+ {"R_MIPS", Type, 6, ""},
+ {"R_MIPS_16", Const, 6, ""},
+ {"R_MIPS_26", Const, 6, ""},
+ {"R_MIPS_32", Const, 6, ""},
+ {"R_MIPS_64", Const, 6, ""},
+ {"R_MIPS_ADD_IMMEDIATE", Const, 6, ""},
+ {"R_MIPS_CALL16", Const, 6, ""},
+ {"R_MIPS_CALL_HI16", Const, 6, ""},
+ {"R_MIPS_CALL_LO16", Const, 6, ""},
+ {"R_MIPS_DELETE", Const, 6, ""},
+ {"R_MIPS_GOT16", Const, 6, ""},
+ {"R_MIPS_GOT_DISP", Const, 6, ""},
+ {"R_MIPS_GOT_HI16", Const, 6, ""},
+ {"R_MIPS_GOT_LO16", Const, 6, ""},
+ {"R_MIPS_GOT_OFST", Const, 6, ""},
+ {"R_MIPS_GOT_PAGE", Const, 6, ""},
+ {"R_MIPS_GPREL16", Const, 6, ""},
+ {"R_MIPS_GPREL32", Const, 6, ""},
+ {"R_MIPS_HI16", Const, 6, ""},
+ {"R_MIPS_HIGHER", Const, 6, ""},
+ {"R_MIPS_HIGHEST", Const, 6, ""},
+ {"R_MIPS_INSERT_A", Const, 6, ""},
+ {"R_MIPS_INSERT_B", Const, 6, ""},
+ {"R_MIPS_JALR", Const, 6, ""},
+ {"R_MIPS_LITERAL", Const, 6, ""},
+ {"R_MIPS_LO16", Const, 6, ""},
+ {"R_MIPS_NONE", Const, 6, ""},
+ {"R_MIPS_PC16", Const, 6, ""},
+ {"R_MIPS_PC32", Const, 22, ""},
+ {"R_MIPS_PJUMP", Const, 6, ""},
+ {"R_MIPS_REL16", Const, 6, ""},
+ {"R_MIPS_REL32", Const, 6, ""},
+ {"R_MIPS_RELGOT", Const, 6, ""},
+ {"R_MIPS_SCN_DISP", Const, 6, ""},
+ {"R_MIPS_SHIFT5", Const, 6, ""},
+ {"R_MIPS_SHIFT6", Const, 6, ""},
+ {"R_MIPS_SUB", Const, 6, ""},
+ {"R_MIPS_TLS_DTPMOD32", Const, 6, ""},
+ {"R_MIPS_TLS_DTPMOD64", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL32", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL64", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL_HI16", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL_LO16", Const, 6, ""},
+ {"R_MIPS_TLS_GD", Const, 6, ""},
+ {"R_MIPS_TLS_GOTTPREL", Const, 6, ""},
+ {"R_MIPS_TLS_LDM", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL32", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL64", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL_HI16", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL_LO16", Const, 6, ""},
+ {"R_PPC", Type, 0, ""},
+ {"R_PPC64", Type, 5, ""},
+ {"R_PPC64_ADDR14", Const, 5, ""},
+ {"R_PPC64_ADDR14_BRNTAKEN", Const, 5, ""},
+ {"R_PPC64_ADDR14_BRTAKEN", Const, 5, ""},
+ {"R_PPC64_ADDR16", Const, 5, ""},
+ {"R_PPC64_ADDR16_DS", Const, 5, ""},
+ {"R_PPC64_ADDR16_HA", Const, 5, ""},
+ {"R_PPC64_ADDR16_HI", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGH", Const, 10, ""},
+ {"R_PPC64_ADDR16_HIGHA", Const, 10, ""},
+ {"R_PPC64_ADDR16_HIGHER", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHER34", Const, 20, ""},
+ {"R_PPC64_ADDR16_HIGHERA", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHERA34", Const, 20, ""},
+ {"R_PPC64_ADDR16_HIGHEST", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHEST34", Const, 20, ""},
+ {"R_PPC64_ADDR16_HIGHESTA", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHESTA34", Const, 20, ""},
+ {"R_PPC64_ADDR16_LO", Const, 5, ""},
+ {"R_PPC64_ADDR16_LO_DS", Const, 5, ""},
+ {"R_PPC64_ADDR24", Const, 5, ""},
+ {"R_PPC64_ADDR32", Const, 5, ""},
+ {"R_PPC64_ADDR64", Const, 5, ""},
+ {"R_PPC64_ADDR64_LOCAL", Const, 10, ""},
+ {"R_PPC64_COPY", Const, 20, ""},
+ {"R_PPC64_D28", Const, 20, ""},
+ {"R_PPC64_D34", Const, 20, ""},
+ {"R_PPC64_D34_HA30", Const, 20, ""},
+ {"R_PPC64_D34_HI30", Const, 20, ""},
+ {"R_PPC64_D34_LO", Const, 20, ""},
+ {"R_PPC64_DTPMOD64", Const, 5, ""},
+ {"R_PPC64_DTPREL16", Const, 5, ""},
+ {"R_PPC64_DTPREL16_DS", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HA", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HI", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGH", Const, 10, ""},
+ {"R_PPC64_DTPREL16_HIGHA", Const, 10, ""},
+ {"R_PPC64_DTPREL16_HIGHER", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGHERA", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGHEST", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGHESTA", Const, 5, ""},
+ {"R_PPC64_DTPREL16_LO", Const, 5, ""},
+ {"R_PPC64_DTPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_DTPREL34", Const, 20, ""},
+ {"R_PPC64_DTPREL64", Const, 5, ""},
+ {"R_PPC64_ENTRY", Const, 10, ""},
+ {"R_PPC64_GLOB_DAT", Const, 20, ""},
+ {"R_PPC64_GNU_VTENTRY", Const, 20, ""},
+ {"R_PPC64_GNU_VTINHERIT", Const, 20, ""},
+ {"R_PPC64_GOT16", Const, 5, ""},
+ {"R_PPC64_GOT16_DS", Const, 5, ""},
+ {"R_PPC64_GOT16_HA", Const, 5, ""},
+ {"R_PPC64_GOT16_HI", Const, 5, ""},
+ {"R_PPC64_GOT16_LO", Const, 5, ""},
+ {"R_PPC64_GOT16_LO_DS", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_DS", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_TLSGD16", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD16_LO", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_TLSLD16", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD16_LO", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_TPREL16_DS", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL_PCREL34", Const, 20, ""},
+ {"R_PPC64_IRELATIVE", Const, 10, ""},
+ {"R_PPC64_JMP_IREL", Const, 10, ""},
+ {"R_PPC64_JMP_SLOT", Const, 5, ""},
+ {"R_PPC64_NONE", Const, 5, ""},
+ {"R_PPC64_PCREL28", Const, 20, ""},
+ {"R_PPC64_PCREL34", Const, 20, ""},
+ {"R_PPC64_PCREL_OPT", Const, 20, ""},
+ {"R_PPC64_PLT16_HA", Const, 20, ""},
+ {"R_PPC64_PLT16_HI", Const, 20, ""},
+ {"R_PPC64_PLT16_LO", Const, 20, ""},
+ {"R_PPC64_PLT16_LO_DS", Const, 10, ""},
+ {"R_PPC64_PLT32", Const, 20, ""},
+ {"R_PPC64_PLT64", Const, 20, ""},
+ {"R_PPC64_PLTCALL", Const, 20, ""},
+ {"R_PPC64_PLTCALL_NOTOC", Const, 20, ""},
+ {"R_PPC64_PLTGOT16", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_DS", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_HA", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_HI", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_LO", Const, 10, ""},
+ {"R_PPC64_PLTGOT_LO_DS", Const, 10, ""},
+ {"R_PPC64_PLTREL32", Const, 20, ""},
+ {"R_PPC64_PLTREL64", Const, 20, ""},
+ {"R_PPC64_PLTSEQ", Const, 20, ""},
+ {"R_PPC64_PLTSEQ_NOTOC", Const, 20, ""},
+ {"R_PPC64_PLT_PCREL34", Const, 20, ""},
+ {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20, ""},
+ {"R_PPC64_REL14", Const, 5, ""},
+ {"R_PPC64_REL14_BRNTAKEN", Const, 5, ""},
+ {"R_PPC64_REL14_BRTAKEN", Const, 5, ""},
+ {"R_PPC64_REL16", Const, 5, ""},
+ {"R_PPC64_REL16DX_HA", Const, 10, ""},
+ {"R_PPC64_REL16_HA", Const, 5, ""},
+ {"R_PPC64_REL16_HI", Const, 5, ""},
+ {"R_PPC64_REL16_HIGH", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHA", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHER", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHER34", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHERA", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHERA34", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHEST", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHEST34", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHESTA", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHESTA34", Const, 20, ""},
+ {"R_PPC64_REL16_LO", Const, 5, ""},
+ {"R_PPC64_REL24", Const, 5, ""},
+ {"R_PPC64_REL24_NOTOC", Const, 10, ""},
+ {"R_PPC64_REL24_P9NOTOC", Const, 21, ""},
+ {"R_PPC64_REL30", Const, 20, ""},
+ {"R_PPC64_REL32", Const, 5, ""},
+ {"R_PPC64_REL64", Const, 5, ""},
+ {"R_PPC64_RELATIVE", Const, 18, ""},
+ {"R_PPC64_SECTOFF", Const, 20, ""},
+ {"R_PPC64_SECTOFF_DS", Const, 10, ""},
+ {"R_PPC64_SECTOFF_HA", Const, 20, ""},
+ {"R_PPC64_SECTOFF_HI", Const, 20, ""},
+ {"R_PPC64_SECTOFF_LO", Const, 20, ""},
+ {"R_PPC64_SECTOFF_LO_DS", Const, 10, ""},
+ {"R_PPC64_TLS", Const, 5, ""},
+ {"R_PPC64_TLSGD", Const, 5, ""},
+ {"R_PPC64_TLSLD", Const, 5, ""},
+ {"R_PPC64_TOC", Const, 5, ""},
+ {"R_PPC64_TOC16", Const, 5, ""},
+ {"R_PPC64_TOC16_DS", Const, 5, ""},
+ {"R_PPC64_TOC16_HA", Const, 5, ""},
+ {"R_PPC64_TOC16_HI", Const, 5, ""},
+ {"R_PPC64_TOC16_LO", Const, 5, ""},
+ {"R_PPC64_TOC16_LO_DS", Const, 5, ""},
+ {"R_PPC64_TOCSAVE", Const, 10, ""},
+ {"R_PPC64_TPREL16", Const, 5, ""},
+ {"R_PPC64_TPREL16_DS", Const, 5, ""},
+ {"R_PPC64_TPREL16_HA", Const, 5, ""},
+ {"R_PPC64_TPREL16_HI", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGH", Const, 10, ""},
+ {"R_PPC64_TPREL16_HIGHA", Const, 10, ""},
+ {"R_PPC64_TPREL16_HIGHER", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGHERA", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGHEST", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGHESTA", Const, 5, ""},
+ {"R_PPC64_TPREL16_LO", Const, 5, ""},
+ {"R_PPC64_TPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_TPREL34", Const, 20, ""},
+ {"R_PPC64_TPREL64", Const, 5, ""},
+ {"R_PPC64_UADDR16", Const, 20, ""},
+ {"R_PPC64_UADDR32", Const, 20, ""},
+ {"R_PPC64_UADDR64", Const, 20, ""},
+ {"R_PPC_ADDR14", Const, 0, ""},
+ {"R_PPC_ADDR14_BRNTAKEN", Const, 0, ""},
+ {"R_PPC_ADDR14_BRTAKEN", Const, 0, ""},
+ {"R_PPC_ADDR16", Const, 0, ""},
+ {"R_PPC_ADDR16_HA", Const, 0, ""},
+ {"R_PPC_ADDR16_HI", Const, 0, ""},
+ {"R_PPC_ADDR16_LO", Const, 0, ""},
+ {"R_PPC_ADDR24", Const, 0, ""},
+ {"R_PPC_ADDR32", Const, 0, ""},
+ {"R_PPC_COPY", Const, 0, ""},
+ {"R_PPC_DTPMOD32", Const, 0, ""},
+ {"R_PPC_DTPREL16", Const, 0, ""},
+ {"R_PPC_DTPREL16_HA", Const, 0, ""},
+ {"R_PPC_DTPREL16_HI", Const, 0, ""},
+ {"R_PPC_DTPREL16_LO", Const, 0, ""},
+ {"R_PPC_DTPREL32", Const, 0, ""},
+ {"R_PPC_EMB_BIT_FLD", Const, 0, ""},
+ {"R_PPC_EMB_MRKREF", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16_HA", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16_HI", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16_LO", Const, 0, ""},
+ {"R_PPC_EMB_NADDR32", Const, 0, ""},
+ {"R_PPC_EMB_RELSDA", Const, 0, ""},
+ {"R_PPC_EMB_RELSEC16", Const, 0, ""},
+ {"R_PPC_EMB_RELST_HA", Const, 0, ""},
+ {"R_PPC_EMB_RELST_HI", Const, 0, ""},
+ {"R_PPC_EMB_RELST_LO", Const, 0, ""},
+ {"R_PPC_EMB_SDA21", Const, 0, ""},
+ {"R_PPC_EMB_SDA2I16", Const, 0, ""},
+ {"R_PPC_EMB_SDA2REL", Const, 0, ""},
+ {"R_PPC_EMB_SDAI16", Const, 0, ""},
+ {"R_PPC_GLOB_DAT", Const, 0, ""},
+ {"R_PPC_GOT16", Const, 0, ""},
+ {"R_PPC_GOT16_HA", Const, 0, ""},
+ {"R_PPC_GOT16_HI", Const, 0, ""},
+ {"R_PPC_GOT16_LO", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16_HA", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16_HI", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16_LO", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16_HA", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16_HI", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16_LO", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16_HA", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16_HI", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16_LO", Const, 0, ""},
+ {"R_PPC_JMP_SLOT", Const, 0, ""},
+ {"R_PPC_LOCAL24PC", Const, 0, ""},
+ {"R_PPC_NONE", Const, 0, ""},
+ {"R_PPC_PLT16_HA", Const, 0, ""},
+ {"R_PPC_PLT16_HI", Const, 0, ""},
+ {"R_PPC_PLT16_LO", Const, 0, ""},
+ {"R_PPC_PLT32", Const, 0, ""},
+ {"R_PPC_PLTREL24", Const, 0, ""},
+ {"R_PPC_PLTREL32", Const, 0, ""},
+ {"R_PPC_REL14", Const, 0, ""},
+ {"R_PPC_REL14_BRNTAKEN", Const, 0, ""},
+ {"R_PPC_REL14_BRTAKEN", Const, 0, ""},
+ {"R_PPC_REL24", Const, 0, ""},
+ {"R_PPC_REL32", Const, 0, ""},
+ {"R_PPC_RELATIVE", Const, 0, ""},
+ {"R_PPC_SDAREL16", Const, 0, ""},
+ {"R_PPC_SECTOFF", Const, 0, ""},
+ {"R_PPC_SECTOFF_HA", Const, 0, ""},
+ {"R_PPC_SECTOFF_HI", Const, 0, ""},
+ {"R_PPC_SECTOFF_LO", Const, 0, ""},
+ {"R_PPC_TLS", Const, 0, ""},
+ {"R_PPC_TPREL16", Const, 0, ""},
+ {"R_PPC_TPREL16_HA", Const, 0, ""},
+ {"R_PPC_TPREL16_HI", Const, 0, ""},
+ {"R_PPC_TPREL16_LO", Const, 0, ""},
+ {"R_PPC_TPREL32", Const, 0, ""},
+ {"R_PPC_UADDR16", Const, 0, ""},
+ {"R_PPC_UADDR32", Const, 0, ""},
+ {"R_RISCV", Type, 11, ""},
+ {"R_RISCV_32", Const, 11, ""},
+ {"R_RISCV_32_PCREL", Const, 12, ""},
+ {"R_RISCV_64", Const, 11, ""},
+ {"R_RISCV_ADD16", Const, 11, ""},
+ {"R_RISCV_ADD32", Const, 11, ""},
+ {"R_RISCV_ADD64", Const, 11, ""},
+ {"R_RISCV_ADD8", Const, 11, ""},
+ {"R_RISCV_ALIGN", Const, 11, ""},
+ {"R_RISCV_BRANCH", Const, 11, ""},
+ {"R_RISCV_CALL", Const, 11, ""},
+ {"R_RISCV_CALL_PLT", Const, 11, ""},
+ {"R_RISCV_COPY", Const, 11, ""},
+ {"R_RISCV_GNU_VTENTRY", Const, 11, ""},
+ {"R_RISCV_GNU_VTINHERIT", Const, 11, ""},
+ {"R_RISCV_GOT_HI20", Const, 11, ""},
+ {"R_RISCV_GPREL_I", Const, 11, ""},
+ {"R_RISCV_GPREL_S", Const, 11, ""},
+ {"R_RISCV_HI20", Const, 11, ""},
+ {"R_RISCV_JAL", Const, 11, ""},
+ {"R_RISCV_JUMP_SLOT", Const, 11, ""},
+ {"R_RISCV_LO12_I", Const, 11, ""},
+ {"R_RISCV_LO12_S", Const, 11, ""},
+ {"R_RISCV_NONE", Const, 11, ""},
+ {"R_RISCV_PCREL_HI20", Const, 11, ""},
+ {"R_RISCV_PCREL_LO12_I", Const, 11, ""},
+ {"R_RISCV_PCREL_LO12_S", Const, 11, ""},
+ {"R_RISCV_RELATIVE", Const, 11, ""},
+ {"R_RISCV_RELAX", Const, 11, ""},
+ {"R_RISCV_RVC_BRANCH", Const, 11, ""},
+ {"R_RISCV_RVC_JUMP", Const, 11, ""},
+ {"R_RISCV_RVC_LUI", Const, 11, ""},
+ {"R_RISCV_SET16", Const, 11, ""},
+ {"R_RISCV_SET32", Const, 11, ""},
+ {"R_RISCV_SET6", Const, 11, ""},
+ {"R_RISCV_SET8", Const, 11, ""},
+ {"R_RISCV_SUB16", Const, 11, ""},
+ {"R_RISCV_SUB32", Const, 11, ""},
+ {"R_RISCV_SUB6", Const, 11, ""},
+ {"R_RISCV_SUB64", Const, 11, ""},
+ {"R_RISCV_SUB8", Const, 11, ""},
+ {"R_RISCV_TLS_DTPMOD32", Const, 11, ""},
+ {"R_RISCV_TLS_DTPMOD64", Const, 11, ""},
+ {"R_RISCV_TLS_DTPREL32", Const, 11, ""},
+ {"R_RISCV_TLS_DTPREL64", Const, 11, ""},
+ {"R_RISCV_TLS_GD_HI20", Const, 11, ""},
+ {"R_RISCV_TLS_GOT_HI20", Const, 11, ""},
+ {"R_RISCV_TLS_TPREL32", Const, 11, ""},
+ {"R_RISCV_TLS_TPREL64", Const, 11, ""},
+ {"R_RISCV_TPREL_ADD", Const, 11, ""},
+ {"R_RISCV_TPREL_HI20", Const, 11, ""},
+ {"R_RISCV_TPREL_I", Const, 11, ""},
+ {"R_RISCV_TPREL_LO12_I", Const, 11, ""},
+ {"R_RISCV_TPREL_LO12_S", Const, 11, ""},
+ {"R_RISCV_TPREL_S", Const, 11, ""},
+ {"R_SPARC", Type, 0, ""},
+ {"R_SPARC_10", Const, 0, ""},
+ {"R_SPARC_11", Const, 0, ""},
+ {"R_SPARC_13", Const, 0, ""},
+ {"R_SPARC_16", Const, 0, ""},
+ {"R_SPARC_22", Const, 0, ""},
+ {"R_SPARC_32", Const, 0, ""},
+ {"R_SPARC_5", Const, 0, ""},
+ {"R_SPARC_6", Const, 0, ""},
+ {"R_SPARC_64", Const, 0, ""},
+ {"R_SPARC_7", Const, 0, ""},
+ {"R_SPARC_8", Const, 0, ""},
+ {"R_SPARC_COPY", Const, 0, ""},
+ {"R_SPARC_DISP16", Const, 0, ""},
+ {"R_SPARC_DISP32", Const, 0, ""},
+ {"R_SPARC_DISP64", Const, 0, ""},
+ {"R_SPARC_DISP8", Const, 0, ""},
+ {"R_SPARC_GLOB_DAT", Const, 0, ""},
+ {"R_SPARC_GLOB_JMP", Const, 0, ""},
+ {"R_SPARC_GOT10", Const, 0, ""},
+ {"R_SPARC_GOT13", Const, 0, ""},
+ {"R_SPARC_GOT22", Const, 0, ""},
+ {"R_SPARC_H44", Const, 0, ""},
+ {"R_SPARC_HH22", Const, 0, ""},
+ {"R_SPARC_HI22", Const, 0, ""},
+ {"R_SPARC_HIPLT22", Const, 0, ""},
+ {"R_SPARC_HIX22", Const, 0, ""},
+ {"R_SPARC_HM10", Const, 0, ""},
+ {"R_SPARC_JMP_SLOT", Const, 0, ""},
+ {"R_SPARC_L44", Const, 0, ""},
+ {"R_SPARC_LM22", Const, 0, ""},
+ {"R_SPARC_LO10", Const, 0, ""},
+ {"R_SPARC_LOPLT10", Const, 0, ""},
+ {"R_SPARC_LOX10", Const, 0, ""},
+ {"R_SPARC_M44", Const, 0, ""},
+ {"R_SPARC_NONE", Const, 0, ""},
+ {"R_SPARC_OLO10", Const, 0, ""},
+ {"R_SPARC_PC10", Const, 0, ""},
+ {"R_SPARC_PC22", Const, 0, ""},
+ {"R_SPARC_PCPLT10", Const, 0, ""},
+ {"R_SPARC_PCPLT22", Const, 0, ""},
+ {"R_SPARC_PCPLT32", Const, 0, ""},
+ {"R_SPARC_PC_HH22", Const, 0, ""},
+ {"R_SPARC_PC_HM10", Const, 0, ""},
+ {"R_SPARC_PC_LM22", Const, 0, ""},
+ {"R_SPARC_PLT32", Const, 0, ""},
+ {"R_SPARC_PLT64", Const, 0, ""},
+ {"R_SPARC_REGISTER", Const, 0, ""},
+ {"R_SPARC_RELATIVE", Const, 0, ""},
+ {"R_SPARC_UA16", Const, 0, ""},
+ {"R_SPARC_UA32", Const, 0, ""},
+ {"R_SPARC_UA64", Const, 0, ""},
+ {"R_SPARC_WDISP16", Const, 0, ""},
+ {"R_SPARC_WDISP19", Const, 0, ""},
+ {"R_SPARC_WDISP22", Const, 0, ""},
+ {"R_SPARC_WDISP30", Const, 0, ""},
+ {"R_SPARC_WPLT30", Const, 0, ""},
+ {"R_SYM32", Func, 0, "func(info uint32) uint32"},
+ {"R_SYM64", Func, 0, "func(info uint64) uint32"},
+ {"R_TYPE32", Func, 0, "func(info uint32) uint32"},
+ {"R_TYPE64", Func, 0, "func(info uint64) uint32"},
+ {"R_X86_64", Type, 0, ""},
+ {"R_X86_64_16", Const, 0, ""},
+ {"R_X86_64_32", Const, 0, ""},
+ {"R_X86_64_32S", Const, 0, ""},
+ {"R_X86_64_64", Const, 0, ""},
+ {"R_X86_64_8", Const, 0, ""},
+ {"R_X86_64_COPY", Const, 0, ""},
+ {"R_X86_64_DTPMOD64", Const, 0, ""},
+ {"R_X86_64_DTPOFF32", Const, 0, ""},
+ {"R_X86_64_DTPOFF64", Const, 0, ""},
+ {"R_X86_64_GLOB_DAT", Const, 0, ""},
+ {"R_X86_64_GOT32", Const, 0, ""},
+ {"R_X86_64_GOT64", Const, 10, ""},
+ {"R_X86_64_GOTOFF64", Const, 10, ""},
+ {"R_X86_64_GOTPC32", Const, 10, ""},
+ {"R_X86_64_GOTPC32_TLSDESC", Const, 10, ""},
+ {"R_X86_64_GOTPC64", Const, 10, ""},
+ {"R_X86_64_GOTPCREL", Const, 0, ""},
+ {"R_X86_64_GOTPCREL64", Const, 10, ""},
+ {"R_X86_64_GOTPCRELX", Const, 10, ""},
+ {"R_X86_64_GOTPLT64", Const, 10, ""},
+ {"R_X86_64_GOTTPOFF", Const, 0, ""},
+ {"R_X86_64_IRELATIVE", Const, 10, ""},
+ {"R_X86_64_JMP_SLOT", Const, 0, ""},
+ {"R_X86_64_NONE", Const, 0, ""},
+ {"R_X86_64_PC16", Const, 0, ""},
+ {"R_X86_64_PC32", Const, 0, ""},
+ {"R_X86_64_PC32_BND", Const, 10, ""},
+ {"R_X86_64_PC64", Const, 10, ""},
+ {"R_X86_64_PC8", Const, 0, ""},
+ {"R_X86_64_PLT32", Const, 0, ""},
+ {"R_X86_64_PLT32_BND", Const, 10, ""},
+ {"R_X86_64_PLTOFF64", Const, 10, ""},
+ {"R_X86_64_RELATIVE", Const, 0, ""},
+ {"R_X86_64_RELATIVE64", Const, 10, ""},
+ {"R_X86_64_REX_GOTPCRELX", Const, 10, ""},
+ {"R_X86_64_SIZE32", Const, 10, ""},
+ {"R_X86_64_SIZE64", Const, 10, ""},
+ {"R_X86_64_TLSDESC", Const, 10, ""},
+ {"R_X86_64_TLSDESC_CALL", Const, 10, ""},
+ {"R_X86_64_TLSGD", Const, 0, ""},
+ {"R_X86_64_TLSLD", Const, 0, ""},
+ {"R_X86_64_TPOFF32", Const, 0, ""},
+ {"R_X86_64_TPOFF64", Const, 0, ""},
+ {"Rel32", Type, 0, ""},
+ {"Rel32.Info", Field, 0, ""},
+ {"Rel32.Off", Field, 0, ""},
+ {"Rel64", Type, 0, ""},
+ {"Rel64.Info", Field, 0, ""},
+ {"Rel64.Off", Field, 0, ""},
+ {"Rela32", Type, 0, ""},
+ {"Rela32.Addend", Field, 0, ""},
+ {"Rela32.Info", Field, 0, ""},
+ {"Rela32.Off", Field, 0, ""},
+ {"Rela64", Type, 0, ""},
+ {"Rela64.Addend", Field, 0, ""},
+ {"Rela64.Info", Field, 0, ""},
+ {"Rela64.Off", Field, 0, ""},
+ {"SHF_ALLOC", Const, 0, ""},
+ {"SHF_COMPRESSED", Const, 6, ""},
+ {"SHF_EXECINSTR", Const, 0, ""},
+ {"SHF_GROUP", Const, 0, ""},
+ {"SHF_INFO_LINK", Const, 0, ""},
+ {"SHF_LINK_ORDER", Const, 0, ""},
+ {"SHF_MASKOS", Const, 0, ""},
+ {"SHF_MASKPROC", Const, 0, ""},
+ {"SHF_MERGE", Const, 0, ""},
+ {"SHF_OS_NONCONFORMING", Const, 0, ""},
+ {"SHF_STRINGS", Const, 0, ""},
+ {"SHF_TLS", Const, 0, ""},
+ {"SHF_WRITE", Const, 0, ""},
+ {"SHN_ABS", Const, 0, ""},
+ {"SHN_COMMON", Const, 0, ""},
+ {"SHN_HIOS", Const, 0, ""},
+ {"SHN_HIPROC", Const, 0, ""},
+ {"SHN_HIRESERVE", Const, 0, ""},
+ {"SHN_LOOS", Const, 0, ""},
+ {"SHN_LOPROC", Const, 0, ""},
+ {"SHN_LORESERVE", Const, 0, ""},
+ {"SHN_UNDEF", Const, 0, ""},
+ {"SHN_XINDEX", Const, 0, ""},
+ {"SHT_DYNAMIC", Const, 0, ""},
+ {"SHT_DYNSYM", Const, 0, ""},
+ {"SHT_FINI_ARRAY", Const, 0, ""},
+ {"SHT_GNU_ATTRIBUTES", Const, 0, ""},
+ {"SHT_GNU_HASH", Const, 0, ""},
+ {"SHT_GNU_LIBLIST", Const, 0, ""},
+ {"SHT_GNU_VERDEF", Const, 0, ""},
+ {"SHT_GNU_VERNEED", Const, 0, ""},
+ {"SHT_GNU_VERSYM", Const, 0, ""},
+ {"SHT_GROUP", Const, 0, ""},
+ {"SHT_HASH", Const, 0, ""},
+ {"SHT_HIOS", Const, 0, ""},
+ {"SHT_HIPROC", Const, 0, ""},
+ {"SHT_HIUSER", Const, 0, ""},
+ {"SHT_INIT_ARRAY", Const, 0, ""},
+ {"SHT_LOOS", Const, 0, ""},
+ {"SHT_LOPROC", Const, 0, ""},
+ {"SHT_LOUSER", Const, 0, ""},
+ {"SHT_MIPS_ABIFLAGS", Const, 17, ""},
+ {"SHT_NOBITS", Const, 0, ""},
+ {"SHT_NOTE", Const, 0, ""},
+ {"SHT_NULL", Const, 0, ""},
+ {"SHT_PREINIT_ARRAY", Const, 0, ""},
+ {"SHT_PROGBITS", Const, 0, ""},
+ {"SHT_REL", Const, 0, ""},
+ {"SHT_RELA", Const, 0, ""},
+ {"SHT_RISCV_ATTRIBUTES", Const, 25, ""},
+ {"SHT_SHLIB", Const, 0, ""},
+ {"SHT_STRTAB", Const, 0, ""},
+ {"SHT_SYMTAB", Const, 0, ""},
+ {"SHT_SYMTAB_SHNDX", Const, 0, ""},
+ {"STB_GLOBAL", Const, 0, ""},
+ {"STB_HIOS", Const, 0, ""},
+ {"STB_HIPROC", Const, 0, ""},
+ {"STB_LOCAL", Const, 0, ""},
+ {"STB_LOOS", Const, 0, ""},
+ {"STB_LOPROC", Const, 0, ""},
+ {"STB_WEAK", Const, 0, ""},
+ {"STT_COMMON", Const, 0, ""},
+ {"STT_FILE", Const, 0, ""},
+ {"STT_FUNC", Const, 0, ""},
+ {"STT_GNU_IFUNC", Const, 23, ""},
+ {"STT_HIOS", Const, 0, ""},
+ {"STT_HIPROC", Const, 0, ""},
+ {"STT_LOOS", Const, 0, ""},
+ {"STT_LOPROC", Const, 0, ""},
+ {"STT_NOTYPE", Const, 0, ""},
+ {"STT_OBJECT", Const, 0, ""},
+ {"STT_RELC", Const, 23, ""},
+ {"STT_SECTION", Const, 0, ""},
+ {"STT_SRELC", Const, 23, ""},
+ {"STT_TLS", Const, 0, ""},
+ {"STV_DEFAULT", Const, 0, ""},
+ {"STV_HIDDEN", Const, 0, ""},
+ {"STV_INTERNAL", Const, 0, ""},
+ {"STV_PROTECTED", Const, 0, ""},
+ {"ST_BIND", Func, 0, "func(info uint8) SymBind"},
+ {"ST_INFO", Func, 0, "func(bind SymBind, typ SymType) uint8"},
+ {"ST_TYPE", Func, 0, "func(info uint8) SymType"},
+ {"ST_VISIBILITY", Func, 0, "func(other uint8) SymVis"},
+ {"Section", Type, 0, ""},
+ {"Section.ReaderAt", Field, 0, ""},
+ {"Section.SectionHeader", Field, 0, ""},
+ {"Section32", Type, 0, ""},
+ {"Section32.Addr", Field, 0, ""},
+ {"Section32.Addralign", Field, 0, ""},
+ {"Section32.Entsize", Field, 0, ""},
+ {"Section32.Flags", Field, 0, ""},
+ {"Section32.Info", Field, 0, ""},
+ {"Section32.Link", Field, 0, ""},
+ {"Section32.Name", Field, 0, ""},
+ {"Section32.Off", Field, 0, ""},
+ {"Section32.Size", Field, 0, ""},
+ {"Section32.Type", Field, 0, ""},
+ {"Section64", Type, 0, ""},
+ {"Section64.Addr", Field, 0, ""},
+ {"Section64.Addralign", Field, 0, ""},
+ {"Section64.Entsize", Field, 0, ""},
+ {"Section64.Flags", Field, 0, ""},
+ {"Section64.Info", Field, 0, ""},
+ {"Section64.Link", Field, 0, ""},
+ {"Section64.Name", Field, 0, ""},
+ {"Section64.Off", Field, 0, ""},
+ {"Section64.Size", Field, 0, ""},
+ {"Section64.Type", Field, 0, ""},
+ {"SectionFlag", Type, 0, ""},
+ {"SectionHeader", Type, 0, ""},
+ {"SectionHeader.Addr", Field, 0, ""},
+ {"SectionHeader.Addralign", Field, 0, ""},
+ {"SectionHeader.Entsize", Field, 0, ""},
+ {"SectionHeader.FileSize", Field, 6, ""},
+ {"SectionHeader.Flags", Field, 0, ""},
+ {"SectionHeader.Info", Field, 0, ""},
+ {"SectionHeader.Link", Field, 0, ""},
+ {"SectionHeader.Name", Field, 0, ""},
+ {"SectionHeader.Offset", Field, 0, ""},
+ {"SectionHeader.Size", Field, 0, ""},
+ {"SectionHeader.Type", Field, 0, ""},
+ {"SectionIndex", Type, 0, ""},
+ {"SectionType", Type, 0, ""},
+ {"Sym32", Type, 0, ""},
+ {"Sym32.Info", Field, 0, ""},
+ {"Sym32.Name", Field, 0, ""},
+ {"Sym32.Other", Field, 0, ""},
+ {"Sym32.Shndx", Field, 0, ""},
+ {"Sym32.Size", Field, 0, ""},
+ {"Sym32.Value", Field, 0, ""},
+ {"Sym32Size", Const, 0, ""},
+ {"Sym64", Type, 0, ""},
+ {"Sym64.Info", Field, 0, ""},
+ {"Sym64.Name", Field, 0, ""},
+ {"Sym64.Other", Field, 0, ""},
+ {"Sym64.Shndx", Field, 0, ""},
+ {"Sym64.Size", Field, 0, ""},
+ {"Sym64.Value", Field, 0, ""},
+ {"Sym64Size", Const, 0, ""},
+ {"SymBind", Type, 0, ""},
+ {"SymType", Type, 0, ""},
+ {"SymVis", Type, 0, ""},
+ {"Symbol", Type, 0, ""},
+ {"Symbol.HasVersion", Field, 24, ""},
+ {"Symbol.Info", Field, 0, ""},
+ {"Symbol.Library", Field, 13, ""},
+ {"Symbol.Name", Field, 0, ""},
+ {"Symbol.Other", Field, 0, ""},
+ {"Symbol.Section", Field, 0, ""},
+ {"Symbol.Size", Field, 0, ""},
+ {"Symbol.Value", Field, 0, ""},
+ {"Symbol.Version", Field, 13, ""},
+ {"Symbol.VersionIndex", Field, 24, ""},
+ {"Type", Type, 0, ""},
+ {"VER_FLG_BASE", Const, 24, ""},
+ {"VER_FLG_INFO", Const, 24, ""},
+ {"VER_FLG_WEAK", Const, 24, ""},
+ {"Version", Type, 0, ""},
+ {"VersionIndex", Type, 24, ""},
+ },
+ "debug/gosym": {
+ {"(*DecodingError).Error", Method, 0, ""},
+ {"(*LineTable).LineToPC", Method, 0, ""},
+ {"(*LineTable).PCToLine", Method, 0, ""},
+ {"(*Sym).BaseName", Method, 0, ""},
+ {"(*Sym).PackageName", Method, 0, ""},
+ {"(*Sym).ReceiverName", Method, 0, ""},
+ {"(*Sym).Static", Method, 0, ""},
+ {"(*Table).LineToPC", Method, 0, ""},
+ {"(*Table).LookupFunc", Method, 0, ""},
+ {"(*Table).LookupSym", Method, 0, ""},
+ {"(*Table).PCToFunc", Method, 0, ""},
+ {"(*Table).PCToLine", Method, 0, ""},
+ {"(*Table).SymByAddr", Method, 0, ""},
+ {"(*UnknownLineError).Error", Method, 0, ""},
+ {"(Func).BaseName", Method, 0, ""},
+ {"(Func).PackageName", Method, 0, ""},
+ {"(Func).ReceiverName", Method, 0, ""},
+ {"(Func).Static", Method, 0, ""},
+ {"(UnknownFileError).Error", Method, 0, ""},
+ {"DecodingError", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"Func.End", Field, 0, ""},
+ {"Func.Entry", Field, 0, ""},
+ {"Func.FrameSize", Field, 0, ""},
+ {"Func.LineTable", Field, 0, ""},
+ {"Func.Locals", Field, 0, ""},
+ {"Func.Obj", Field, 0, ""},
+ {"Func.Params", Field, 0, ""},
+ {"Func.Sym", Field, 0, ""},
+ {"LineTable", Type, 0, ""},
+ {"LineTable.Data", Field, 0, ""},
+ {"LineTable.Line", Field, 0, ""},
+ {"LineTable.PC", Field, 0, ""},
+ {"NewLineTable", Func, 0, "func(data []byte, text uint64) *LineTable"},
+ {"NewTable", Func, 0, "func(symtab []byte, pcln *LineTable) (*Table, error)"},
+ {"Obj", Type, 0, ""},
+ {"Obj.Funcs", Field, 0, ""},
+ {"Obj.Paths", Field, 0, ""},
+ {"Sym", Type, 0, ""},
+ {"Sym.Func", Field, 0, ""},
+ {"Sym.GoType", Field, 0, ""},
+ {"Sym.Name", Field, 0, ""},
+ {"Sym.Type", Field, 0, ""},
+ {"Sym.Value", Field, 0, ""},
+ {"Table", Type, 0, ""},
+ {"Table.Files", Field, 0, ""},
+ {"Table.Funcs", Field, 0, ""},
+ {"Table.Objs", Field, 0, ""},
+ {"Table.Syms", Field, 0, ""},
+ {"UnknownFileError", Type, 0, ""},
+ {"UnknownLineError", Type, 0, ""},
+ {"UnknownLineError.File", Field, 0, ""},
+ {"UnknownLineError.Line", Field, 0, ""},
+ },
+ "debug/macho": {
+ {"(*FatFile).Close", Method, 3, ""},
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).DWARF", Method, 0, ""},
+ {"(*File).ImportedLibraries", Method, 0, ""},
+ {"(*File).ImportedSymbols", Method, 0, ""},
+ {"(*File).Section", Method, 0, ""},
+ {"(*File).Segment", Method, 0, ""},
+ {"(*FormatError).Error", Method, 0, ""},
+ {"(*Section).Data", Method, 0, ""},
+ {"(*Section).Open", Method, 0, ""},
+ {"(*Segment).Data", Method, 0, ""},
+ {"(*Segment).Open", Method, 0, ""},
+ {"(Cpu).GoString", Method, 0, ""},
+ {"(Cpu).String", Method, 0, ""},
+ {"(Dylib).Raw", Method, 0, ""},
+ {"(Dysymtab).Raw", Method, 0, ""},
+ {"(FatArch).Close", Method, 3, ""},
+ {"(FatArch).DWARF", Method, 3, ""},
+ {"(FatArch).ImportedLibraries", Method, 3, ""},
+ {"(FatArch).ImportedSymbols", Method, 3, ""},
+ {"(FatArch).Section", Method, 3, ""},
+ {"(FatArch).Segment", Method, 3, ""},
+ {"(LoadBytes).Raw", Method, 0, ""},
+ {"(LoadCmd).GoString", Method, 0, ""},
+ {"(LoadCmd).String", Method, 0, ""},
+ {"(RelocTypeARM).GoString", Method, 10, ""},
+ {"(RelocTypeARM).String", Method, 10, ""},
+ {"(RelocTypeARM64).GoString", Method, 10, ""},
+ {"(RelocTypeARM64).String", Method, 10, ""},
+ {"(RelocTypeGeneric).GoString", Method, 10, ""},
+ {"(RelocTypeGeneric).String", Method, 10, ""},
+ {"(RelocTypeX86_64).GoString", Method, 10, ""},
+ {"(RelocTypeX86_64).String", Method, 10, ""},
+ {"(Rpath).Raw", Method, 10, ""},
+ {"(Section).ReadAt", Method, 0, ""},
+ {"(Segment).Raw", Method, 0, ""},
+ {"(Segment).ReadAt", Method, 0, ""},
+ {"(Symtab).Raw", Method, 0, ""},
+ {"(Type).GoString", Method, 10, ""},
+ {"(Type).String", Method, 10, ""},
+ {"ARM64_RELOC_ADDEND", Const, 10, ""},
+ {"ARM64_RELOC_BRANCH26", Const, 10, ""},
+ {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10, ""},
+ {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10, ""},
+ {"ARM64_RELOC_PAGE21", Const, 10, ""},
+ {"ARM64_RELOC_PAGEOFF12", Const, 10, ""},
+ {"ARM64_RELOC_POINTER_TO_GOT", Const, 10, ""},
+ {"ARM64_RELOC_SUBTRACTOR", Const, 10, ""},
+ {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10, ""},
+ {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10, ""},
+ {"ARM64_RELOC_UNSIGNED", Const, 10, ""},
+ {"ARM_RELOC_BR24", Const, 10, ""},
+ {"ARM_RELOC_HALF", Const, 10, ""},
+ {"ARM_RELOC_HALF_SECTDIFF", Const, 10, ""},
+ {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
+ {"ARM_RELOC_PAIR", Const, 10, ""},
+ {"ARM_RELOC_PB_LA_PTR", Const, 10, ""},
+ {"ARM_RELOC_SECTDIFF", Const, 10, ""},
+ {"ARM_RELOC_VANILLA", Const, 10, ""},
+ {"ARM_THUMB_32BIT_BRANCH", Const, 10, ""},
+ {"ARM_THUMB_RELOC_BR22", Const, 10, ""},
+ {"Cpu", Type, 0, ""},
+ {"Cpu386", Const, 0, ""},
+ {"CpuAmd64", Const, 0, ""},
+ {"CpuArm", Const, 3, ""},
+ {"CpuArm64", Const, 11, ""},
+ {"CpuPpc", Const, 3, ""},
+ {"CpuPpc64", Const, 3, ""},
+ {"Dylib", Type, 0, ""},
+ {"Dylib.CompatVersion", Field, 0, ""},
+ {"Dylib.CurrentVersion", Field, 0, ""},
+ {"Dylib.LoadBytes", Field, 0, ""},
+ {"Dylib.Name", Field, 0, ""},
+ {"Dylib.Time", Field, 0, ""},
+ {"DylibCmd", Type, 0, ""},
+ {"DylibCmd.Cmd", Field, 0, ""},
+ {"DylibCmd.CompatVersion", Field, 0, ""},
+ {"DylibCmd.CurrentVersion", Field, 0, ""},
+ {"DylibCmd.Len", Field, 0, ""},
+ {"DylibCmd.Name", Field, 0, ""},
+ {"DylibCmd.Time", Field, 0, ""},
+ {"Dysymtab", Type, 0, ""},
+ {"Dysymtab.DysymtabCmd", Field, 0, ""},
+ {"Dysymtab.IndirectSyms", Field, 0, ""},
+ {"Dysymtab.LoadBytes", Field, 0, ""},
+ {"DysymtabCmd", Type, 0, ""},
+ {"DysymtabCmd.Cmd", Field, 0, ""},
+ {"DysymtabCmd.Extrefsymoff", Field, 0, ""},
+ {"DysymtabCmd.Extreloff", Field, 0, ""},
+ {"DysymtabCmd.Iextdefsym", Field, 0, ""},
+ {"DysymtabCmd.Ilocalsym", Field, 0, ""},
+ {"DysymtabCmd.Indirectsymoff", Field, 0, ""},
+ {"DysymtabCmd.Iundefsym", Field, 0, ""},
+ {"DysymtabCmd.Len", Field, 0, ""},
+ {"DysymtabCmd.Locreloff", Field, 0, ""},
+ {"DysymtabCmd.Modtaboff", Field, 0, ""},
+ {"DysymtabCmd.Nextdefsym", Field, 0, ""},
+ {"DysymtabCmd.Nextrefsyms", Field, 0, ""},
+ {"DysymtabCmd.Nextrel", Field, 0, ""},
+ {"DysymtabCmd.Nindirectsyms", Field, 0, ""},
+ {"DysymtabCmd.Nlocalsym", Field, 0, ""},
+ {"DysymtabCmd.Nlocrel", Field, 0, ""},
+ {"DysymtabCmd.Nmodtab", Field, 0, ""},
+ {"DysymtabCmd.Ntoc", Field, 0, ""},
+ {"DysymtabCmd.Nundefsym", Field, 0, ""},
+ {"DysymtabCmd.Tocoffset", Field, 0, ""},
+ {"ErrNotFat", Var, 3, ""},
+ {"FatArch", Type, 3, ""},
+ {"FatArch.FatArchHeader", Field, 3, ""},
+ {"FatArch.File", Field, 3, ""},
+ {"FatArchHeader", Type, 3, ""},
+ {"FatArchHeader.Align", Field, 3, ""},
+ {"FatArchHeader.Cpu", Field, 3, ""},
+ {"FatArchHeader.Offset", Field, 3, ""},
+ {"FatArchHeader.Size", Field, 3, ""},
+ {"FatArchHeader.SubCpu", Field, 3, ""},
+ {"FatFile", Type, 3, ""},
+ {"FatFile.Arches", Field, 3, ""},
+ {"FatFile.Magic", Field, 3, ""},
+ {"File", Type, 0, ""},
+ {"File.ByteOrder", Field, 0, ""},
+ {"File.Dysymtab", Field, 0, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"File.Loads", Field, 0, ""},
+ {"File.Sections", Field, 0, ""},
+ {"File.Symtab", Field, 0, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.Cmdsz", Field, 0, ""},
+ {"FileHeader.Cpu", Field, 0, ""},
+ {"FileHeader.Flags", Field, 0, ""},
+ {"FileHeader.Magic", Field, 0, ""},
+ {"FileHeader.Ncmd", Field, 0, ""},
+ {"FileHeader.SubCpu", Field, 0, ""},
+ {"FileHeader.Type", Field, 0, ""},
+ {"FlagAllModsBound", Const, 10, ""},
+ {"FlagAllowStackExecution", Const, 10, ""},
+ {"FlagAppExtensionSafe", Const, 10, ""},
+ {"FlagBindAtLoad", Const, 10, ""},
+ {"FlagBindsToWeak", Const, 10, ""},
+ {"FlagCanonical", Const, 10, ""},
+ {"FlagDeadStrippableDylib", Const, 10, ""},
+ {"FlagDyldLink", Const, 10, ""},
+ {"FlagForceFlat", Const, 10, ""},
+ {"FlagHasTLVDescriptors", Const, 10, ""},
+ {"FlagIncrLink", Const, 10, ""},
+ {"FlagLazyInit", Const, 10, ""},
+ {"FlagNoFixPrebinding", Const, 10, ""},
+ {"FlagNoHeapExecution", Const, 10, ""},
+ {"FlagNoMultiDefs", Const, 10, ""},
+ {"FlagNoReexportedDylibs", Const, 10, ""},
+ {"FlagNoUndefs", Const, 10, ""},
+ {"FlagPIE", Const, 10, ""},
+ {"FlagPrebindable", Const, 10, ""},
+ {"FlagPrebound", Const, 10, ""},
+ {"FlagRootSafe", Const, 10, ""},
+ {"FlagSetuidSafe", Const, 10, ""},
+ {"FlagSplitSegs", Const, 10, ""},
+ {"FlagSubsectionsViaSymbols", Const, 10, ""},
+ {"FlagTwoLevel", Const, 10, ""},
+ {"FlagWeakDefines", Const, 10, ""},
+ {"FormatError", Type, 0, ""},
+ {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
+ {"GENERIC_RELOC_PAIR", Const, 10, ""},
+ {"GENERIC_RELOC_PB_LA_PTR", Const, 10, ""},
+ {"GENERIC_RELOC_SECTDIFF", Const, 10, ""},
+ {"GENERIC_RELOC_TLV", Const, 10, ""},
+ {"GENERIC_RELOC_VANILLA", Const, 10, ""},
+ {"Load", Type, 0, ""},
+ {"LoadBytes", Type, 0, ""},
+ {"LoadCmd", Type, 0, ""},
+ {"LoadCmdDylib", Const, 0, ""},
+ {"LoadCmdDylinker", Const, 0, ""},
+ {"LoadCmdDysymtab", Const, 0, ""},
+ {"LoadCmdRpath", Const, 10, ""},
+ {"LoadCmdSegment", Const, 0, ""},
+ {"LoadCmdSegment64", Const, 0, ""},
+ {"LoadCmdSymtab", Const, 0, ""},
+ {"LoadCmdThread", Const, 0, ""},
+ {"LoadCmdUnixThread", Const, 0, ""},
+ {"Magic32", Const, 0, ""},
+ {"Magic64", Const, 0, ""},
+ {"MagicFat", Const, 3, ""},
+ {"NewFatFile", Func, 3, "func(r io.ReaderAt) (*FatFile, error)"},
+ {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+ {"Nlist32", Type, 0, ""},
+ {"Nlist32.Desc", Field, 0, ""},
+ {"Nlist32.Name", Field, 0, ""},
+ {"Nlist32.Sect", Field, 0, ""},
+ {"Nlist32.Type", Field, 0, ""},
+ {"Nlist32.Value", Field, 0, ""},
+ {"Nlist64", Type, 0, ""},
+ {"Nlist64.Desc", Field, 0, ""},
+ {"Nlist64.Name", Field, 0, ""},
+ {"Nlist64.Sect", Field, 0, ""},
+ {"Nlist64.Type", Field, 0, ""},
+ {"Nlist64.Value", Field, 0, ""},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"OpenFat", Func, 3, "func(name string) (*FatFile, error)"},
+ {"Regs386", Type, 0, ""},
+ {"Regs386.AX", Field, 0, ""},
+ {"Regs386.BP", Field, 0, ""},
+ {"Regs386.BX", Field, 0, ""},
+ {"Regs386.CS", Field, 0, ""},
+ {"Regs386.CX", Field, 0, ""},
+ {"Regs386.DI", Field, 0, ""},
+ {"Regs386.DS", Field, 0, ""},
+ {"Regs386.DX", Field, 0, ""},
+ {"Regs386.ES", Field, 0, ""},
+ {"Regs386.FLAGS", Field, 0, ""},
+ {"Regs386.FS", Field, 0, ""},
+ {"Regs386.GS", Field, 0, ""},
+ {"Regs386.IP", Field, 0, ""},
+ {"Regs386.SI", Field, 0, ""},
+ {"Regs386.SP", Field, 0, ""},
+ {"Regs386.SS", Field, 0, ""},
+ {"RegsAMD64", Type, 0, ""},
+ {"RegsAMD64.AX", Field, 0, ""},
+ {"RegsAMD64.BP", Field, 0, ""},
+ {"RegsAMD64.BX", Field, 0, ""},
+ {"RegsAMD64.CS", Field, 0, ""},
+ {"RegsAMD64.CX", Field, 0, ""},
+ {"RegsAMD64.DI", Field, 0, ""},
+ {"RegsAMD64.DX", Field, 0, ""},
+ {"RegsAMD64.FLAGS", Field, 0, ""},
+ {"RegsAMD64.FS", Field, 0, ""},
+ {"RegsAMD64.GS", Field, 0, ""},
+ {"RegsAMD64.IP", Field, 0, ""},
+ {"RegsAMD64.R10", Field, 0, ""},
+ {"RegsAMD64.R11", Field, 0, ""},
+ {"RegsAMD64.R12", Field, 0, ""},
+ {"RegsAMD64.R13", Field, 0, ""},
+ {"RegsAMD64.R14", Field, 0, ""},
+ {"RegsAMD64.R15", Field, 0, ""},
+ {"RegsAMD64.R8", Field, 0, ""},
+ {"RegsAMD64.R9", Field, 0, ""},
+ {"RegsAMD64.SI", Field, 0, ""},
+ {"RegsAMD64.SP", Field, 0, ""},
+ {"Reloc", Type, 10, ""},
+ {"Reloc.Addr", Field, 10, ""},
+ {"Reloc.Extern", Field, 10, ""},
+ {"Reloc.Len", Field, 10, ""},
+ {"Reloc.Pcrel", Field, 10, ""},
+ {"Reloc.Scattered", Field, 10, ""},
+ {"Reloc.Type", Field, 10, ""},
+ {"Reloc.Value", Field, 10, ""},
+ {"RelocTypeARM", Type, 10, ""},
+ {"RelocTypeARM64", Type, 10, ""},
+ {"RelocTypeGeneric", Type, 10, ""},
+ {"RelocTypeX86_64", Type, 10, ""},
+ {"Rpath", Type, 10, ""},
+ {"Rpath.LoadBytes", Field, 10, ""},
+ {"Rpath.Path", Field, 10, ""},
+ {"RpathCmd", Type, 10, ""},
+ {"RpathCmd.Cmd", Field, 10, ""},
+ {"RpathCmd.Len", Field, 10, ""},
+ {"RpathCmd.Path", Field, 10, ""},
+ {"Section", Type, 0, ""},
+ {"Section.ReaderAt", Field, 0, ""},
+ {"Section.Relocs", Field, 10, ""},
+ {"Section.SectionHeader", Field, 0, ""},
+ {"Section32", Type, 0, ""},
+ {"Section32.Addr", Field, 0, ""},
+ {"Section32.Align", Field, 0, ""},
+ {"Section32.Flags", Field, 0, ""},
+ {"Section32.Name", Field, 0, ""},
+ {"Section32.Nreloc", Field, 0, ""},
+ {"Section32.Offset", Field, 0, ""},
+ {"Section32.Reloff", Field, 0, ""},
+ {"Section32.Reserve1", Field, 0, ""},
+ {"Section32.Reserve2", Field, 0, ""},
+ {"Section32.Seg", Field, 0, ""},
+ {"Section32.Size", Field, 0, ""},
+ {"Section64", Type, 0, ""},
+ {"Section64.Addr", Field, 0, ""},
+ {"Section64.Align", Field, 0, ""},
+ {"Section64.Flags", Field, 0, ""},
+ {"Section64.Name", Field, 0, ""},
+ {"Section64.Nreloc", Field, 0, ""},
+ {"Section64.Offset", Field, 0, ""},
+ {"Section64.Reloff", Field, 0, ""},
+ {"Section64.Reserve1", Field, 0, ""},
+ {"Section64.Reserve2", Field, 0, ""},
+ {"Section64.Reserve3", Field, 0, ""},
+ {"Section64.Seg", Field, 0, ""},
+ {"Section64.Size", Field, 0, ""},
+ {"SectionHeader", Type, 0, ""},
+ {"SectionHeader.Addr", Field, 0, ""},
+ {"SectionHeader.Align", Field, 0, ""},
+ {"SectionHeader.Flags", Field, 0, ""},
+ {"SectionHeader.Name", Field, 0, ""},
+ {"SectionHeader.Nreloc", Field, 0, ""},
+ {"SectionHeader.Offset", Field, 0, ""},
+ {"SectionHeader.Reloff", Field, 0, ""},
+ {"SectionHeader.Seg", Field, 0, ""},
+ {"SectionHeader.Size", Field, 0, ""},
+ {"Segment", Type, 0, ""},
+ {"Segment.LoadBytes", Field, 0, ""},
+ {"Segment.ReaderAt", Field, 0, ""},
+ {"Segment.SegmentHeader", Field, 0, ""},
+ {"Segment32", Type, 0, ""},
+ {"Segment32.Addr", Field, 0, ""},
+ {"Segment32.Cmd", Field, 0, ""},
+ {"Segment32.Filesz", Field, 0, ""},
+ {"Segment32.Flag", Field, 0, ""},
+ {"Segment32.Len", Field, 0, ""},
+ {"Segment32.Maxprot", Field, 0, ""},
+ {"Segment32.Memsz", Field, 0, ""},
+ {"Segment32.Name", Field, 0, ""},
+ {"Segment32.Nsect", Field, 0, ""},
+ {"Segment32.Offset", Field, 0, ""},
+ {"Segment32.Prot", Field, 0, ""},
+ {"Segment64", Type, 0, ""},
+ {"Segment64.Addr", Field, 0, ""},
+ {"Segment64.Cmd", Field, 0, ""},
+ {"Segment64.Filesz", Field, 0, ""},
+ {"Segment64.Flag", Field, 0, ""},
+ {"Segment64.Len", Field, 0, ""},
+ {"Segment64.Maxprot", Field, 0, ""},
+ {"Segment64.Memsz", Field, 0, ""},
+ {"Segment64.Name", Field, 0, ""},
+ {"Segment64.Nsect", Field, 0, ""},
+ {"Segment64.Offset", Field, 0, ""},
+ {"Segment64.Prot", Field, 0, ""},
+ {"SegmentHeader", Type, 0, ""},
+ {"SegmentHeader.Addr", Field, 0, ""},
+ {"SegmentHeader.Cmd", Field, 0, ""},
+ {"SegmentHeader.Filesz", Field, 0, ""},
+ {"SegmentHeader.Flag", Field, 0, ""},
+ {"SegmentHeader.Len", Field, 0, ""},
+ {"SegmentHeader.Maxprot", Field, 0, ""},
+ {"SegmentHeader.Memsz", Field, 0, ""},
+ {"SegmentHeader.Name", Field, 0, ""},
+ {"SegmentHeader.Nsect", Field, 0, ""},
+ {"SegmentHeader.Offset", Field, 0, ""},
+ {"SegmentHeader.Prot", Field, 0, ""},
+ {"Symbol", Type, 0, ""},
+ {"Symbol.Desc", Field, 0, ""},
+ {"Symbol.Name", Field, 0, ""},
+ {"Symbol.Sect", Field, 0, ""},
+ {"Symbol.Type", Field, 0, ""},
+ {"Symbol.Value", Field, 0, ""},
+ {"Symtab", Type, 0, ""},
+ {"Symtab.LoadBytes", Field, 0, ""},
+ {"Symtab.Syms", Field, 0, ""},
+ {"Symtab.SymtabCmd", Field, 0, ""},
+ {"SymtabCmd", Type, 0, ""},
+ {"SymtabCmd.Cmd", Field, 0, ""},
+ {"SymtabCmd.Len", Field, 0, ""},
+ {"SymtabCmd.Nsyms", Field, 0, ""},
+ {"SymtabCmd.Stroff", Field, 0, ""},
+ {"SymtabCmd.Strsize", Field, 0, ""},
+ {"SymtabCmd.Symoff", Field, 0, ""},
+ {"Thread", Type, 0, ""},
+ {"Thread.Cmd", Field, 0, ""},
+ {"Thread.Data", Field, 0, ""},
+ {"Thread.Len", Field, 0, ""},
+ {"Thread.Type", Field, 0, ""},
+ {"Type", Type, 0, ""},
+ {"TypeBundle", Const, 3, ""},
+ {"TypeDylib", Const, 3, ""},
+ {"TypeExec", Const, 0, ""},
+ {"TypeObj", Const, 0, ""},
+ {"X86_64_RELOC_BRANCH", Const, 10, ""},
+ {"X86_64_RELOC_GOT", Const, 10, ""},
+ {"X86_64_RELOC_GOT_LOAD", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED_1", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED_2", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED_4", Const, 10, ""},
+ {"X86_64_RELOC_SUBTRACTOR", Const, 10, ""},
+ {"X86_64_RELOC_TLV", Const, 10, ""},
+ {"X86_64_RELOC_UNSIGNED", Const, 10, ""},
+ },
+ "debug/pe": {
+ {"(*COFFSymbol).FullName", Method, 8, ""},
+ {"(*File).COFFSymbolReadSectionDefAux", Method, 19, ""},
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).DWARF", Method, 0, ""},
+ {"(*File).ImportedLibraries", Method, 0, ""},
+ {"(*File).ImportedSymbols", Method, 0, ""},
+ {"(*File).Section", Method, 0, ""},
+ {"(*FormatError).Error", Method, 0, ""},
+ {"(*Section).Data", Method, 0, ""},
+ {"(*Section).Open", Method, 0, ""},
+ {"(Section).ReadAt", Method, 0, ""},
+ {"(StringTable).String", Method, 8, ""},
+ {"COFFSymbol", Type, 1, ""},
+ {"COFFSymbol.Name", Field, 1, ""},
+ {"COFFSymbol.NumberOfAuxSymbols", Field, 1, ""},
+ {"COFFSymbol.SectionNumber", Field, 1, ""},
+ {"COFFSymbol.StorageClass", Field, 1, ""},
+ {"COFFSymbol.Type", Field, 1, ""},
+ {"COFFSymbol.Value", Field, 1, ""},
+ {"COFFSymbolAuxFormat5", Type, 19, ""},
+ {"COFFSymbolAuxFormat5.Checksum", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.NumRelocs", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.SecNum", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.Selection", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.Size", Field, 19, ""},
+ {"COFFSymbolSize", Const, 1, ""},
+ {"DataDirectory", Type, 3, ""},
+ {"DataDirectory.Size", Field, 3, ""},
+ {"DataDirectory.VirtualAddress", Field, 3, ""},
+ {"File", Type, 0, ""},
+ {"File.COFFSymbols", Field, 8, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"File.OptionalHeader", Field, 3, ""},
+ {"File.Sections", Field, 0, ""},
+ {"File.StringTable", Field, 8, ""},
+ {"File.Symbols", Field, 1, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.Characteristics", Field, 0, ""},
+ {"FileHeader.Machine", Field, 0, ""},
+ {"FileHeader.NumberOfSections", Field, 0, ""},
+ {"FileHeader.NumberOfSymbols", Field, 0, ""},
+ {"FileHeader.PointerToSymbolTable", Field, 0, ""},
+ {"FileHeader.SizeOfOptionalHeader", Field, 0, ""},
+ {"FileHeader.TimeDateStamp", Field, 0, ""},
+ {"FormatError", Type, 0, ""},
+ {"IMAGE_COMDAT_SELECT_ANY", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19, ""},
+ {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11, ""},
+ {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15, ""},
+ {"IMAGE_FILE_32BIT_MACHINE", Const, 15, ""},
+ {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15, ""},
+ {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15, ""},
+ {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15, ""},
+ {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_DLL", Const, 15, ""},
+ {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15, ""},
+ {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15, ""},
+ {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_MACHINE_AM33", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_AMD64", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_ARM", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_ARM64", Const, 11, ""},
+ {"IMAGE_FILE_MACHINE_ARMNT", Const, 12, ""},
+ {"IMAGE_FILE_MACHINE_EBC", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_I386", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_IA64", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19, ""},
+ {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19, ""},
+ {"IMAGE_FILE_MACHINE_M32R", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_MIPS16", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_POWERPC", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_R4000", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_RISCV128", Const, 20, ""},
+ {"IMAGE_FILE_MACHINE_RISCV32", Const, 20, ""},
+ {"IMAGE_FILE_MACHINE_RISCV64", Const, 20, ""},
+ {"IMAGE_FILE_MACHINE_SH3", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_SH4", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_SH5", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_THUMB", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0, ""},
+ {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15, ""},
+ {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15, ""},
+ {"IMAGE_FILE_SYSTEM", Const, 15, ""},
+ {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15, ""},
+ {"IMAGE_SCN_CNT_CODE", Const, 19, ""},
+ {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19, ""},
+ {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19, ""},
+ {"IMAGE_SCN_LNK_COMDAT", Const, 19, ""},
+ {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19, ""},
+ {"IMAGE_SCN_MEM_EXECUTE", Const, 19, ""},
+ {"IMAGE_SCN_MEM_READ", Const, 19, ""},
+ {"IMAGE_SCN_MEM_WRITE", Const, 19, ""},
+ {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_NATIVE", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_XBOX", Const, 15, ""},
+ {"ImportDirectory", Type, 0, ""},
+ {"ImportDirectory.FirstThunk", Field, 0, ""},
+ {"ImportDirectory.ForwarderChain", Field, 0, ""},
+ {"ImportDirectory.Name", Field, 0, ""},
+ {"ImportDirectory.OriginalFirstThunk", Field, 0, ""},
+ {"ImportDirectory.TimeDateStamp", Field, 0, ""},
+ {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"OptionalHeader32", Type, 3, ""},
+ {"OptionalHeader32.AddressOfEntryPoint", Field, 3, ""},
+ {"OptionalHeader32.BaseOfCode", Field, 3, ""},
+ {"OptionalHeader32.BaseOfData", Field, 3, ""},
+ {"OptionalHeader32.CheckSum", Field, 3, ""},
+ {"OptionalHeader32.DataDirectory", Field, 3, ""},
+ {"OptionalHeader32.DllCharacteristics", Field, 3, ""},
+ {"OptionalHeader32.FileAlignment", Field, 3, ""},
+ {"OptionalHeader32.ImageBase", Field, 3, ""},
+ {"OptionalHeader32.LoaderFlags", Field, 3, ""},
+ {"OptionalHeader32.Magic", Field, 3, ""},
+ {"OptionalHeader32.MajorImageVersion", Field, 3, ""},
+ {"OptionalHeader32.MajorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader32.MajorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorImageVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3, ""},
+ {"OptionalHeader32.SectionAlignment", Field, 3, ""},
+ {"OptionalHeader32.SizeOfCode", Field, 3, ""},
+ {"OptionalHeader32.SizeOfHeaders", Field, 3, ""},
+ {"OptionalHeader32.SizeOfHeapCommit", Field, 3, ""},
+ {"OptionalHeader32.SizeOfHeapReserve", Field, 3, ""},
+ {"OptionalHeader32.SizeOfImage", Field, 3, ""},
+ {"OptionalHeader32.SizeOfInitializedData", Field, 3, ""},
+ {"OptionalHeader32.SizeOfStackCommit", Field, 3, ""},
+ {"OptionalHeader32.SizeOfStackReserve", Field, 3, ""},
+ {"OptionalHeader32.SizeOfUninitializedData", Field, 3, ""},
+ {"OptionalHeader32.Subsystem", Field, 3, ""},
+ {"OptionalHeader32.Win32VersionValue", Field, 3, ""},
+ {"OptionalHeader64", Type, 3, ""},
+ {"OptionalHeader64.AddressOfEntryPoint", Field, 3, ""},
+ {"OptionalHeader64.BaseOfCode", Field, 3, ""},
+ {"OptionalHeader64.CheckSum", Field, 3, ""},
+ {"OptionalHeader64.DataDirectory", Field, 3, ""},
+ {"OptionalHeader64.DllCharacteristics", Field, 3, ""},
+ {"OptionalHeader64.FileAlignment", Field, 3, ""},
+ {"OptionalHeader64.ImageBase", Field, 3, ""},
+ {"OptionalHeader64.LoaderFlags", Field, 3, ""},
+ {"OptionalHeader64.Magic", Field, 3, ""},
+ {"OptionalHeader64.MajorImageVersion", Field, 3, ""},
+ {"OptionalHeader64.MajorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader64.MajorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorImageVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3, ""},
+ {"OptionalHeader64.SectionAlignment", Field, 3, ""},
+ {"OptionalHeader64.SizeOfCode", Field, 3, ""},
+ {"OptionalHeader64.SizeOfHeaders", Field, 3, ""},
+ {"OptionalHeader64.SizeOfHeapCommit", Field, 3, ""},
+ {"OptionalHeader64.SizeOfHeapReserve", Field, 3, ""},
+ {"OptionalHeader64.SizeOfImage", Field, 3, ""},
+ {"OptionalHeader64.SizeOfInitializedData", Field, 3, ""},
+ {"OptionalHeader64.SizeOfStackCommit", Field, 3, ""},
+ {"OptionalHeader64.SizeOfStackReserve", Field, 3, ""},
+ {"OptionalHeader64.SizeOfUninitializedData", Field, 3, ""},
+ {"OptionalHeader64.Subsystem", Field, 3, ""},
+ {"OptionalHeader64.Win32VersionValue", Field, 3, ""},
+ {"Reloc", Type, 8, ""},
+ {"Reloc.SymbolTableIndex", Field, 8, ""},
+ {"Reloc.Type", Field, 8, ""},
+ {"Reloc.VirtualAddress", Field, 8, ""},
+ {"Section", Type, 0, ""},
+ {"Section.ReaderAt", Field, 0, ""},
+ {"Section.Relocs", Field, 8, ""},
+ {"Section.SectionHeader", Field, 0, ""},
+ {"SectionHeader", Type, 0, ""},
+ {"SectionHeader.Characteristics", Field, 0, ""},
+ {"SectionHeader.Name", Field, 0, ""},
+ {"SectionHeader.NumberOfLineNumbers", Field, 0, ""},
+ {"SectionHeader.NumberOfRelocations", Field, 0, ""},
+ {"SectionHeader.Offset", Field, 0, ""},
+ {"SectionHeader.PointerToLineNumbers", Field, 0, ""},
+ {"SectionHeader.PointerToRelocations", Field, 0, ""},
+ {"SectionHeader.Size", Field, 0, ""},
+ {"SectionHeader.VirtualAddress", Field, 0, ""},
+ {"SectionHeader.VirtualSize", Field, 0, ""},
+ {"SectionHeader32", Type, 0, ""},
+ {"SectionHeader32.Characteristics", Field, 0, ""},
+ {"SectionHeader32.Name", Field, 0, ""},
+ {"SectionHeader32.NumberOfLineNumbers", Field, 0, ""},
+ {"SectionHeader32.NumberOfRelocations", Field, 0, ""},
+ {"SectionHeader32.PointerToLineNumbers", Field, 0, ""},
+ {"SectionHeader32.PointerToRawData", Field, 0, ""},
+ {"SectionHeader32.PointerToRelocations", Field, 0, ""},
+ {"SectionHeader32.SizeOfRawData", Field, 0, ""},
+ {"SectionHeader32.VirtualAddress", Field, 0, ""},
+ {"SectionHeader32.VirtualSize", Field, 0, ""},
+ {"StringTable", Type, 8, ""},
+ {"Symbol", Type, 1, ""},
+ {"Symbol.Name", Field, 1, ""},
+ {"Symbol.SectionNumber", Field, 1, ""},
+ {"Symbol.StorageClass", Field, 1, ""},
+ {"Symbol.Type", Field, 1, ""},
+ {"Symbol.Value", Field, 1, ""},
+ },
+ "debug/plan9obj": {
+ {"(*File).Close", Method, 3, ""},
+ {"(*File).Section", Method, 3, ""},
+ {"(*File).Symbols", Method, 3, ""},
+ {"(*Section).Data", Method, 3, ""},
+ {"(*Section).Open", Method, 3, ""},
+ {"(Section).ReadAt", Method, 3, ""},
+ {"ErrNoSymbols", Var, 18, ""},
+ {"File", Type, 3, ""},
+ {"File.FileHeader", Field, 3, ""},
+ {"File.Sections", Field, 3, ""},
+ {"FileHeader", Type, 3, ""},
+ {"FileHeader.Bss", Field, 3, ""},
+ {"FileHeader.Entry", Field, 3, ""},
+ {"FileHeader.HdrSize", Field, 4, ""},
+ {"FileHeader.LoadAddress", Field, 4, ""},
+ {"FileHeader.Magic", Field, 3, ""},
+ {"FileHeader.PtrSize", Field, 3, ""},
+ {"Magic386", Const, 3, ""},
+ {"Magic64", Const, 3, ""},
+ {"MagicAMD64", Const, 3, ""},
+ {"MagicARM", Const, 3, ""},
+ {"NewFile", Func, 3, "func(r io.ReaderAt) (*File, error)"},
+ {"Open", Func, 3, "func(name string) (*File, error)"},
+ {"Section", Type, 3, ""},
+ {"Section.ReaderAt", Field, 3, ""},
+ {"Section.SectionHeader", Field, 3, ""},
+ {"SectionHeader", Type, 3, ""},
+ {"SectionHeader.Name", Field, 3, ""},
+ {"SectionHeader.Offset", Field, 3, ""},
+ {"SectionHeader.Size", Field, 3, ""},
+ {"Sym", Type, 3, ""},
+ {"Sym.Name", Field, 3, ""},
+ {"Sym.Type", Field, 3, ""},
+ {"Sym.Value", Field, 3, ""},
+ },
+ "embed": {
+ {"(FS).Open", Method, 16, ""},
+ {"(FS).ReadDir", Method, 16, ""},
+ {"(FS).ReadFile", Method, 16, ""},
+ {"FS", Type, 16, ""},
+ },
+ "encoding": {
+ {"BinaryAppender", Type, 24, ""},
+ {"BinaryMarshaler", Type, 2, ""},
+ {"BinaryUnmarshaler", Type, 2, ""},
+ {"TextAppender", Type, 24, ""},
+ {"TextMarshaler", Type, 2, ""},
+ {"TextUnmarshaler", Type, 2, ""},
+ },
+ "encoding/ascii85": {
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"Decode", Func, 0, "func(dst []byte, src []byte, flush bool) (ndst int, nsrc int, err error)"},
+ {"Encode", Func, 0, "func(dst []byte, src []byte) int"},
+ {"MaxEncodedLen", Func, 0, "func(n int) int"},
+ {"NewDecoder", Func, 0, "func(r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) io.WriteCloser"},
+ },
+ "encoding/asn1": {
+ {"(BitString).At", Method, 0, ""},
+ {"(BitString).RightAlign", Method, 0, ""},
+ {"(ObjectIdentifier).Equal", Method, 0, ""},
+ {"(ObjectIdentifier).String", Method, 3, ""},
+ {"(StructuralError).Error", Method, 0, ""},
+ {"(SyntaxError).Error", Method, 0, ""},
+ {"BitString", Type, 0, ""},
+ {"BitString.BitLength", Field, 0, ""},
+ {"BitString.Bytes", Field, 0, ""},
+ {"ClassApplication", Const, 6, ""},
+ {"ClassContextSpecific", Const, 6, ""},
+ {"ClassPrivate", Const, 6, ""},
+ {"ClassUniversal", Const, 6, ""},
+ {"Enumerated", Type, 0, ""},
+ {"Flag", Type, 0, ""},
+ {"Marshal", Func, 0, "func(val any) ([]byte, error)"},
+ {"MarshalWithParams", Func, 10, "func(val any, params string) ([]byte, error)"},
+ {"NullBytes", Var, 9, ""},
+ {"NullRawValue", Var, 9, ""},
+ {"ObjectIdentifier", Type, 0, ""},
+ {"RawContent", Type, 0, ""},
+ {"RawValue", Type, 0, ""},
+ {"RawValue.Bytes", Field, 0, ""},
+ {"RawValue.Class", Field, 0, ""},
+ {"RawValue.FullBytes", Field, 0, ""},
+ {"RawValue.IsCompound", Field, 0, ""},
+ {"RawValue.Tag", Field, 0, ""},
+ {"StructuralError", Type, 0, ""},
+ {"StructuralError.Msg", Field, 0, ""},
+ {"SyntaxError", Type, 0, ""},
+ {"SyntaxError.Msg", Field, 0, ""},
+ {"TagBMPString", Const, 14, ""},
+ {"TagBitString", Const, 6, ""},
+ {"TagBoolean", Const, 6, ""},
+ {"TagEnum", Const, 6, ""},
+ {"TagGeneralString", Const, 6, ""},
+ {"TagGeneralizedTime", Const, 6, ""},
+ {"TagIA5String", Const, 6, ""},
+ {"TagInteger", Const, 6, ""},
+ {"TagNull", Const, 9, ""},
+ {"TagNumericString", Const, 10, ""},
+ {"TagOID", Const, 6, ""},
+ {"TagOctetString", Const, 6, ""},
+ {"TagPrintableString", Const, 6, ""},
+ {"TagSequence", Const, 6, ""},
+ {"TagSet", Const, 6, ""},
+ {"TagT61String", Const, 6, ""},
+ {"TagUTCTime", Const, 6, ""},
+ {"TagUTF8String", Const, 6, ""},
+ {"Unmarshal", Func, 0, "func(b []byte, val any) (rest []byte, err error)"},
+ {"UnmarshalWithParams", Func, 0, "func(b []byte, val any, params string) (rest []byte, err error)"},
+ },
+ "encoding/base32": {
+ {"(*Encoding).AppendDecode", Method, 22, ""},
+ {"(*Encoding).AppendEncode", Method, 22, ""},
+ {"(*Encoding).Decode", Method, 0, ""},
+ {"(*Encoding).DecodeString", Method, 0, ""},
+ {"(*Encoding).DecodedLen", Method, 0, ""},
+ {"(*Encoding).Encode", Method, 0, ""},
+ {"(*Encoding).EncodeToString", Method, 0, ""},
+ {"(*Encoding).EncodedLen", Method, 0, ""},
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"(Encoding).WithPadding", Method, 9, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"Encoding", Type, 0, ""},
+ {"HexEncoding", Var, 0, ""},
+ {"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
+ {"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
+ {"NoPadding", Const, 9, ""},
+ {"StdEncoding", Var, 0, ""},
+ {"StdPadding", Const, 9, ""},
+ },
+ "encoding/base64": {
+ {"(*Encoding).AppendDecode", Method, 22, ""},
+ {"(*Encoding).AppendEncode", Method, 22, ""},
+ {"(*Encoding).Decode", Method, 0, ""},
+ {"(*Encoding).DecodeString", Method, 0, ""},
+ {"(*Encoding).DecodedLen", Method, 0, ""},
+ {"(*Encoding).Encode", Method, 0, ""},
+ {"(*Encoding).EncodeToString", Method, 0, ""},
+ {"(*Encoding).EncodedLen", Method, 0, ""},
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"(Encoding).Strict", Method, 8, ""},
+ {"(Encoding).WithPadding", Method, 5, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"Encoding", Type, 0, ""},
+ {"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
+ {"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
+ {"NoPadding", Const, 5, ""},
+ {"RawStdEncoding", Var, 5, ""},
+ {"RawURLEncoding", Var, 5, ""},
+ {"StdEncoding", Var, 0, ""},
+ {"StdPadding", Const, 5, ""},
+ {"URLEncoding", Var, 0, ""},
+ },
+ "encoding/binary": {
+ {"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"},
+ {"AppendByteOrder", Type, 19, ""},
+ {"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"},
+ {"AppendVarint", Func, 19, "func(buf []byte, x int64) []byte"},
+ {"BigEndian", Var, 0, ""},
+ {"ByteOrder", Type, 0, ""},
+ {"Decode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
+ {"Encode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
+ {"LittleEndian", Var, 0, ""},
+ {"MaxVarintLen16", Const, 0, ""},
+ {"MaxVarintLen32", Const, 0, ""},
+ {"MaxVarintLen64", Const, 0, ""},
+ {"NativeEndian", Var, 21, ""},
+ {"PutUvarint", Func, 0, "func(buf []byte, x uint64) int"},
+ {"PutVarint", Func, 0, "func(buf []byte, x int64) int"},
+ {"Read", Func, 0, "func(r io.Reader, order ByteOrder, data any) error"},
+ {"ReadUvarint", Func, 0, "func(r io.ByteReader) (uint64, error)"},
+ {"ReadVarint", Func, 0, "func(r io.ByteReader) (int64, error)"},
+ {"Size", Func, 0, "func(v any) int"},
+ {"Uvarint", Func, 0, "func(buf []byte) (uint64, int)"},
+ {"Varint", Func, 0, "func(buf []byte) (int64, int)"},
+ {"Write", Func, 0, "func(w io.Writer, order ByteOrder, data any) error"},
+ },
+ "encoding/csv": {
+ {"(*ParseError).Error", Method, 0, ""},
+ {"(*ParseError).Unwrap", Method, 13, ""},
+ {"(*Reader).FieldPos", Method, 17, ""},
+ {"(*Reader).InputOffset", Method, 19, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadAll", Method, 0, ""},
+ {"(*Writer).Error", Method, 1, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(*Writer).WriteAll", Method, 0, ""},
+ {"ErrBareQuote", Var, 0, ""},
+ {"ErrFieldCount", Var, 0, ""},
+ {"ErrQuote", Var, 0, ""},
+ {"ErrTrailingComma", Var, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) *Reader"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"ParseError", Type, 0, ""},
+ {"ParseError.Column", Field, 0, ""},
+ {"ParseError.Err", Field, 0, ""},
+ {"ParseError.Line", Field, 0, ""},
+ {"ParseError.StartLine", Field, 10, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.Comma", Field, 0, ""},
+ {"Reader.Comment", Field, 0, ""},
+ {"Reader.FieldsPerRecord", Field, 0, ""},
+ {"Reader.LazyQuotes", Field, 0, ""},
+ {"Reader.ReuseRecord", Field, 9, ""},
+ {"Reader.TrailingComma", Field, 0, ""},
+ {"Reader.TrimLeadingSpace", Field, 0, ""},
+ {"Writer", Type, 0, ""},
+ {"Writer.Comma", Field, 0, ""},
+ {"Writer.UseCRLF", Field, 0, ""},
+ },
+ "encoding/gob": {
+ {"(*Decoder).Decode", Method, 0, ""},
+ {"(*Decoder).DecodeValue", Method, 0, ""},
+ {"(*Encoder).Encode", Method, 0, ""},
+ {"(*Encoder).EncodeValue", Method, 0, ""},
+ {"CommonType", Type, 0, ""},
+ {"CommonType.Id", Field, 0, ""},
+ {"CommonType.Name", Field, 0, ""},
+ {"Decoder", Type, 0, ""},
+ {"Encoder", Type, 0, ""},
+ {"GobDecoder", Type, 0, ""},
+ {"GobEncoder", Type, 0, ""},
+ {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+ {"Register", Func, 0, "func(value any)"},
+ {"RegisterName", Func, 0, "func(name string, value any)"},
+ },
+ "encoding/hex": {
+ {"(InvalidByteError).Error", Method, 0, ""},
+ {"AppendDecode", Func, 22, "func(dst []byte, src []byte) ([]byte, error)"},
+ {"AppendEncode", Func, 22, "func(dst []byte, src []byte) []byte"},
+ {"Decode", Func, 0, "func(dst []byte, src []byte) (int, error)"},
+ {"DecodeString", Func, 0, "func(s string) ([]byte, error)"},
+ {"DecodedLen", Func, 0, "func(x int) int"},
+ {"Dump", Func, 0, "func(data []byte) string"},
+ {"Dumper", Func, 0, "func(w io.Writer) io.WriteCloser"},
+ {"Encode", Func, 0, "func(dst []byte, src []byte) int"},
+ {"EncodeToString", Func, 0, "func(src []byte) string"},
+ {"EncodedLen", Func, 0, "func(n int) int"},
+ {"ErrLength", Var, 0, ""},
+ {"InvalidByteError", Type, 0, ""},
+ {"NewDecoder", Func, 10, "func(r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 10, "func(w io.Writer) io.Writer"},
+ },
+ "encoding/json": {
+ {"(*Decoder).Buffered", Method, 1, ""},
+ {"(*Decoder).Decode", Method, 0, ""},
+ {"(*Decoder).DisallowUnknownFields", Method, 10, ""},
+ {"(*Decoder).InputOffset", Method, 14, ""},
+ {"(*Decoder).More", Method, 5, ""},
+ {"(*Decoder).Token", Method, 5, ""},
+ {"(*Decoder).UseNumber", Method, 1, ""},
+ {"(*Encoder).Encode", Method, 0, ""},
+ {"(*Encoder).SetEscapeHTML", Method, 7, ""},
+ {"(*Encoder).SetIndent", Method, 7, ""},
+ {"(*InvalidUTF8Error).Error", Method, 0, ""},
+ {"(*InvalidUnmarshalError).Error", Method, 0, ""},
+ {"(*MarshalerError).Error", Method, 0, ""},
+ {"(*MarshalerError).Unwrap", Method, 13, ""},
+ {"(*RawMessage).MarshalJSON", Method, 0, ""},
+ {"(*RawMessage).UnmarshalJSON", Method, 0, ""},
+ {"(*SyntaxError).Error", Method, 0, ""},
+ {"(*UnmarshalFieldError).Error", Method, 0, ""},
+ {"(*UnmarshalTypeError).Error", Method, 0, ""},
+ {"(*UnsupportedTypeError).Error", Method, 0, ""},
+ {"(*UnsupportedValueError).Error", Method, 0, ""},
+ {"(Delim).String", Method, 5, ""},
+ {"(Number).Float64", Method, 1, ""},
+ {"(Number).Int64", Method, 1, ""},
+ {"(Number).String", Method, 1, ""},
+ {"(RawMessage).MarshalJSON", Method, 8, ""},
+ {"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"},
+ {"Decoder", Type, 0, ""},
+ {"Delim", Type, 5, ""},
+ {"Encoder", Type, 0, ""},
+ {"HTMLEscape", Func, 0, "func(dst *bytes.Buffer, src []byte)"},
+ {"Indent", Func, 0, "func(dst *bytes.Buffer, src []byte, prefix string, indent string) error"},
+ {"InvalidUTF8Error", Type, 0, ""},
+ {"InvalidUTF8Error.S", Field, 0, ""},
+ {"InvalidUnmarshalError", Type, 0, ""},
+ {"InvalidUnmarshalError.Type", Field, 0, ""},
+ {"Marshal", Func, 0, "func(v any) ([]byte, error)"},
+ {"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
+ {"Marshaler", Type, 0, ""},
+ {"MarshalerError", Type, 0, ""},
+ {"MarshalerError.Err", Field, 0, ""},
+ {"MarshalerError.Type", Field, 0, ""},
+ {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+ {"Number", Type, 1, ""},
+ {"RawMessage", Type, 0, ""},
+ {"SyntaxError", Type, 0, ""},
+ {"SyntaxError.Offset", Field, 0, ""},
+ {"Token", Type, 5, ""},
+ {"Unmarshal", Func, 0, "func(data []byte, v any) error"},
+ {"UnmarshalFieldError", Type, 0, ""},
+ {"UnmarshalFieldError.Field", Field, 0, ""},
+ {"UnmarshalFieldError.Key", Field, 0, ""},
+ {"UnmarshalFieldError.Type", Field, 0, ""},
+ {"UnmarshalTypeError", Type, 0, ""},
+ {"UnmarshalTypeError.Field", Field, 8, ""},
+ {"UnmarshalTypeError.Offset", Field, 5, ""},
+ {"UnmarshalTypeError.Struct", Field, 8, ""},
+ {"UnmarshalTypeError.Type", Field, 0, ""},
+ {"UnmarshalTypeError.Value", Field, 0, ""},
+ {"Unmarshaler", Type, 0, ""},
+ {"UnsupportedTypeError", Type, 0, ""},
+ {"UnsupportedTypeError.Type", Field, 0, ""},
+ {"UnsupportedValueError", Type, 0, ""},
+ {"UnsupportedValueError.Str", Field, 0, ""},
+ {"UnsupportedValueError.Value", Field, 0, ""},
+ {"Valid", Func, 9, "func(data []byte) bool"},
+ },
+ "encoding/pem": {
+ {"Block", Type, 0, ""},
+ {"Block.Bytes", Field, 0, ""},
+ {"Block.Headers", Field, 0, ""},
+ {"Block.Type", Field, 0, ""},
+ {"Decode", Func, 0, "func(data []byte) (p *Block, rest []byte)"},
+ {"Encode", Func, 0, "func(out io.Writer, b *Block) error"},
+ {"EncodeToMemory", Func, 0, "func(b *Block) []byte"},
+ },
+ "encoding/xml": {
+ {"(*Decoder).Decode", Method, 0, ""},
+ {"(*Decoder).DecodeElement", Method, 0, ""},
+ {"(*Decoder).InputOffset", Method, 4, ""},
+ {"(*Decoder).InputPos", Method, 19, ""},
+ {"(*Decoder).RawToken", Method, 0, ""},
+ {"(*Decoder).Skip", Method, 0, ""},
+ {"(*Decoder).Token", Method, 0, ""},
+ {"(*Encoder).Close", Method, 20, ""},
+ {"(*Encoder).Encode", Method, 0, ""},
+ {"(*Encoder).EncodeElement", Method, 2, ""},
+ {"(*Encoder).EncodeToken", Method, 2, ""},
+ {"(*Encoder).Flush", Method, 2, ""},
+ {"(*Encoder).Indent", Method, 1, ""},
+ {"(*SyntaxError).Error", Method, 0, ""},
+ {"(*TagPathError).Error", Method, 0, ""},
+ {"(*UnsupportedTypeError).Error", Method, 0, ""},
+ {"(CharData).Copy", Method, 0, ""},
+ {"(Comment).Copy", Method, 0, ""},
+ {"(Directive).Copy", Method, 0, ""},
+ {"(ProcInst).Copy", Method, 0, ""},
+ {"(StartElement).Copy", Method, 0, ""},
+ {"(StartElement).End", Method, 2, ""},
+ {"(UnmarshalError).Error", Method, 0, ""},
+ {"Attr", Type, 0, ""},
+ {"Attr.Name", Field, 0, ""},
+ {"Attr.Value", Field, 0, ""},
+ {"CharData", Type, 0, ""},
+ {"Comment", Type, 0, ""},
+ {"CopyToken", Func, 0, "func(t Token) Token"},
+ {"Decoder", Type, 0, ""},
+ {"Decoder.AutoClose", Field, 0, ""},
+ {"Decoder.CharsetReader", Field, 0, ""},
+ {"Decoder.DefaultSpace", Field, 1, ""},
+ {"Decoder.Entity", Field, 0, ""},
+ {"Decoder.Strict", Field, 0, ""},
+ {"Directive", Type, 0, ""},
+ {"Encoder", Type, 0, ""},
+ {"EndElement", Type, 0, ""},
+ {"EndElement.Name", Field, 0, ""},
+ {"Escape", Func, 0, "func(w io.Writer, s []byte)"},
+ {"EscapeText", Func, 1, "func(w io.Writer, s []byte) error"},
+ {"HTMLAutoClose", Var, 0, ""},
+ {"HTMLEntity", Var, 0, ""},
+ {"Header", Const, 0, ""},
+ {"Marshal", Func, 0, "func(v any) ([]byte, error)"},
+ {"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
+ {"Marshaler", Type, 2, ""},
+ {"MarshalerAttr", Type, 2, ""},
+ {"Name", Type, 0, ""},
+ {"Name.Local", Field, 0, ""},
+ {"Name.Space", Field, 0, ""},
+ {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+ {"NewTokenDecoder", Func, 10, "func(t TokenReader) *Decoder"},
+ {"ProcInst", Type, 0, ""},
+ {"ProcInst.Inst", Field, 0, ""},
+ {"ProcInst.Target", Field, 0, ""},
+ {"StartElement", Type, 0, ""},
+ {"StartElement.Attr", Field, 0, ""},
+ {"StartElement.Name", Field, 0, ""},
+ {"SyntaxError", Type, 0, ""},
+ {"SyntaxError.Line", Field, 0, ""},
+ {"SyntaxError.Msg", Field, 0, ""},
+ {"TagPathError", Type, 0, ""},
+ {"TagPathError.Field1", Field, 0, ""},
+ {"TagPathError.Field2", Field, 0, ""},
+ {"TagPathError.Struct", Field, 0, ""},
+ {"TagPathError.Tag1", Field, 0, ""},
+ {"TagPathError.Tag2", Field, 0, ""},
+ {"Token", Type, 0, ""},
+ {"TokenReader", Type, 10, ""},
+ {"Unmarshal", Func, 0, "func(data []byte, v any) error"},
+ {"UnmarshalError", Type, 0, ""},
+ {"Unmarshaler", Type, 2, ""},
+ {"UnmarshalerAttr", Type, 2, ""},
+ {"UnsupportedTypeError", Type, 0, ""},
+ {"UnsupportedTypeError.Type", Field, 0, ""},
+ },
+ "errors": {
+ {"As", Func, 13, "func(err error, target any) bool"},
+ {"ErrUnsupported", Var, 21, ""},
+ {"Is", Func, 13, "func(err error, target error) bool"},
+ {"Join", Func, 20, "func(errs ...error) error"},
+ {"New", Func, 0, "func(text string) error"},
+ {"Unwrap", Func, 13, "func(err error) error"},
+ },
+ "expvar": {
+ {"(*Float).Add", Method, 0, ""},
+ {"(*Float).Set", Method, 0, ""},
+ {"(*Float).String", Method, 0, ""},
+ {"(*Float).Value", Method, 8, ""},
+ {"(*Int).Add", Method, 0, ""},
+ {"(*Int).Set", Method, 0, ""},
+ {"(*Int).String", Method, 0, ""},
+ {"(*Int).Value", Method, 8, ""},
+ {"(*Map).Add", Method, 0, ""},
+ {"(*Map).AddFloat", Method, 0, ""},
+ {"(*Map).Delete", Method, 12, ""},
+ {"(*Map).Do", Method, 0, ""},
+ {"(*Map).Get", Method, 0, ""},
+ {"(*Map).Init", Method, 0, ""},
+ {"(*Map).Set", Method, 0, ""},
+ {"(*Map).String", Method, 0, ""},
+ {"(*String).Set", Method, 0, ""},
+ {"(*String).String", Method, 0, ""},
+ {"(*String).Value", Method, 8, ""},
+ {"(Func).String", Method, 0, ""},
+ {"(Func).Value", Method, 8, ""},
+ {"Do", Func, 0, "func(f func(KeyValue))"},
+ {"Float", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"Get", Func, 0, "func(name string) Var"},
+ {"Handler", Func, 8, "func() http.Handler"},
+ {"Int", Type, 0, ""},
+ {"KeyValue", Type, 0, ""},
+ {"KeyValue.Key", Field, 0, ""},
+ {"KeyValue.Value", Field, 0, ""},
+ {"Map", Type, 0, ""},
+ {"NewFloat", Func, 0, "func(name string) *Float"},
+ {"NewInt", Func, 0, "func(name string) *Int"},
+ {"NewMap", Func, 0, "func(name string) *Map"},
+ {"NewString", Func, 0, "func(name string) *String"},
+ {"Publish", Func, 0, "func(name string, v Var)"},
+ {"String", Type, 0, ""},
+ {"Var", Type, 0, ""},
+ },
+ "flag": {
+ {"(*FlagSet).Arg", Method, 0, ""},
+ {"(*FlagSet).Args", Method, 0, ""},
+ {"(*FlagSet).Bool", Method, 0, ""},
+ {"(*FlagSet).BoolFunc", Method, 21, ""},
+ {"(*FlagSet).BoolVar", Method, 0, ""},
+ {"(*FlagSet).Duration", Method, 0, ""},
+ {"(*FlagSet).DurationVar", Method, 0, ""},
+ {"(*FlagSet).ErrorHandling", Method, 10, ""},
+ {"(*FlagSet).Float64", Method, 0, ""},
+ {"(*FlagSet).Float64Var", Method, 0, ""},
+ {"(*FlagSet).Func", Method, 16, ""},
+ {"(*FlagSet).Init", Method, 0, ""},
+ {"(*FlagSet).Int", Method, 0, ""},
+ {"(*FlagSet).Int64", Method, 0, ""},
+ {"(*FlagSet).Int64Var", Method, 0, ""},
+ {"(*FlagSet).IntVar", Method, 0, ""},
+ {"(*FlagSet).Lookup", Method, 0, ""},
+ {"(*FlagSet).NArg", Method, 0, ""},
+ {"(*FlagSet).NFlag", Method, 0, ""},
+ {"(*FlagSet).Name", Method, 10, ""},
+ {"(*FlagSet).Output", Method, 10, ""},
+ {"(*FlagSet).Parse", Method, 0, ""},
+ {"(*FlagSet).Parsed", Method, 0, ""},
+ {"(*FlagSet).PrintDefaults", Method, 0, ""},
+ {"(*FlagSet).Set", Method, 0, ""},
+ {"(*FlagSet).SetOutput", Method, 0, ""},
+ {"(*FlagSet).String", Method, 0, ""},
+ {"(*FlagSet).StringVar", Method, 0, ""},
+ {"(*FlagSet).TextVar", Method, 19, ""},
+ {"(*FlagSet).Uint", Method, 0, ""},
+ {"(*FlagSet).Uint64", Method, 0, ""},
+ {"(*FlagSet).Uint64Var", Method, 0, ""},
+ {"(*FlagSet).UintVar", Method, 0, ""},
+ {"(*FlagSet).Var", Method, 0, ""},
+ {"(*FlagSet).Visit", Method, 0, ""},
+ {"(*FlagSet).VisitAll", Method, 0, ""},
+ {"Arg", Func, 0, "func(i int) string"},
+ {"Args", Func, 0, "func() []string"},
+ {"Bool", Func, 0, "func(name string, value bool, usage string) *bool"},
+ {"BoolFunc", Func, 21, "func(name string, usage string, fn func(string) error)"},
+ {"BoolVar", Func, 0, "func(p *bool, name string, value bool, usage string)"},
+ {"CommandLine", Var, 2, ""},
+ {"ContinueOnError", Const, 0, ""},
+ {"Duration", Func, 0, "func(name string, value time.Duration, usage string) *time.Duration"},
+ {"DurationVar", Func, 0, "func(p *time.Duration, name string, value time.Duration, usage string)"},
+ {"ErrHelp", Var, 0, ""},
+ {"ErrorHandling", Type, 0, ""},
+ {"ExitOnError", Const, 0, ""},
+ {"Flag", Type, 0, ""},
+ {"Flag.DefValue", Field, 0, ""},
+ {"Flag.Name", Field, 0, ""},
+ {"Flag.Usage", Field, 0, ""},
+ {"Flag.Value", Field, 0, ""},
+ {"FlagSet", Type, 0, ""},
+ {"FlagSet.Usage", Field, 0, ""},
+ {"Float64", Func, 0, "func(name string, value float64, usage string) *float64"},
+ {"Float64Var", Func, 0, "func(p *float64, name string, value float64, usage string)"},
+ {"Func", Func, 16, "func(name string, usage string, fn func(string) error)"},
+ {"Getter", Type, 2, ""},
+ {"Int", Func, 0, "func(name string, value int, usage string) *int"},
+ {"Int64", Func, 0, "func(name string, value int64, usage string) *int64"},
+ {"Int64Var", Func, 0, "func(p *int64, name string, value int64, usage string)"},
+ {"IntVar", Func, 0, "func(p *int, name string, value int, usage string)"},
+ {"Lookup", Func, 0, "func(name string) *Flag"},
+ {"NArg", Func, 0, "func() int"},
+ {"NFlag", Func, 0, "func() int"},
+ {"NewFlagSet", Func, 0, "func(name string, errorHandling ErrorHandling) *FlagSet"},
+ {"PanicOnError", Const, 0, ""},
+ {"Parse", Func, 0, "func()"},
+ {"Parsed", Func, 0, "func() bool"},
+ {"PrintDefaults", Func, 0, "func()"},
+ {"Set", Func, 0, "func(name string, value string) error"},
+ {"String", Func, 0, "func(name string, value string, usage string) *string"},
+ {"StringVar", Func, 0, "func(p *string, name string, value string, usage string)"},
+ {"TextVar", Func, 19, "func(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string)"},
+ {"Uint", Func, 0, "func(name string, value uint, usage string) *uint"},
+ {"Uint64", Func, 0, "func(name string, value uint64, usage string) *uint64"},
+ {"Uint64Var", Func, 0, "func(p *uint64, name string, value uint64, usage string)"},
+ {"UintVar", Func, 0, "func(p *uint, name string, value uint, usage string)"},
+ {"UnquoteUsage", Func, 5, "func(flag *Flag) (name string, usage string)"},
+ {"Usage", Var, 0, ""},
+ {"Value", Type, 0, ""},
+ {"Var", Func, 0, "func(value Value, name string, usage string)"},
+ {"Visit", Func, 0, "func(fn func(*Flag))"},
+ {"VisitAll", Func, 0, "func(fn func(*Flag))"},
+ },
+ "fmt": {
+ {"Append", Func, 19, "func(b []byte, a ...any) []byte"},
+ {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"},
+ {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"},
+ {"Errorf", Func, 0, "func(format string, a ...any) error"},
+ {"FormatString", Func, 20, "func(state State, verb rune) string"},
+ {"Formatter", Type, 0, ""},
+ {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
+ {"Fprintf", Func, 0, "func(w io.Writer, format string, a ...any) (n int, err error)"},
+ {"Fprintln", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
+ {"Fscan", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
+ {"Fscanf", Func, 0, "func(r io.Reader, format string, a ...any) (n int, err error)"},
+ {"Fscanln", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
+ {"GoStringer", Type, 0, ""},
+ {"Print", Func, 0, "func(a ...any) (n int, err error)"},
+ {"Printf", Func, 0, "func(format string, a ...any) (n int, err error)"},
+ {"Println", Func, 0, "func(a ...any) (n int, err error)"},
+ {"Scan", Func, 0, "func(a ...any) (n int, err error)"},
+ {"ScanState", Type, 0, ""},
+ {"Scanf", Func, 0, "func(format string, a ...any) (n int, err error)"},
+ {"Scanln", Func, 0, "func(a ...any) (n int, err error)"},
+ {"Scanner", Type, 0, ""},
+ {"Sprint", Func, 0, "func(a ...any) string"},
+ {"Sprintf", Func, 0, "func(format string, a ...any) string"},
+ {"Sprintln", Func, 0, "func(a ...any) string"},
+ {"Sscan", Func, 0, "func(str string, a ...any) (n int, err error)"},
+ {"Sscanf", Func, 0, "func(str string, format string, a ...any) (n int, err error)"},
+ {"Sscanln", Func, 0, "func(str string, a ...any) (n int, err error)"},
+ {"State", Type, 0, ""},
+ {"Stringer", Type, 0, ""},
+ },
+ "go/ast": {
+ {"(*ArrayType).End", Method, 0, ""},
+ {"(*ArrayType).Pos", Method, 0, ""},
+ {"(*AssignStmt).End", Method, 0, ""},
+ {"(*AssignStmt).Pos", Method, 0, ""},
+ {"(*BadDecl).End", Method, 0, ""},
+ {"(*BadDecl).Pos", Method, 0, ""},
+ {"(*BadExpr).End", Method, 0, ""},
+ {"(*BadExpr).Pos", Method, 0, ""},
+ {"(*BadStmt).End", Method, 0, ""},
+ {"(*BadStmt).Pos", Method, 0, ""},
+ {"(*BasicLit).End", Method, 0, ""},
+ {"(*BasicLit).Pos", Method, 0, ""},
+ {"(*BinaryExpr).End", Method, 0, ""},
+ {"(*BinaryExpr).Pos", Method, 0, ""},
+ {"(*BlockStmt).End", Method, 0, ""},
+ {"(*BlockStmt).Pos", Method, 0, ""},
+ {"(*BranchStmt).End", Method, 0, ""},
+ {"(*BranchStmt).Pos", Method, 0, ""},
+ {"(*CallExpr).End", Method, 0, ""},
+ {"(*CallExpr).Pos", Method, 0, ""},
+ {"(*CaseClause).End", Method, 0, ""},
+ {"(*CaseClause).Pos", Method, 0, ""},
+ {"(*ChanType).End", Method, 0, ""},
+ {"(*ChanType).Pos", Method, 0, ""},
+ {"(*CommClause).End", Method, 0, ""},
+ {"(*CommClause).Pos", Method, 0, ""},
+ {"(*Comment).End", Method, 0, ""},
+ {"(*Comment).Pos", Method, 0, ""},
+ {"(*CommentGroup).End", Method, 0, ""},
+ {"(*CommentGroup).Pos", Method, 0, ""},
+ {"(*CommentGroup).Text", Method, 0, ""},
+ {"(*CompositeLit).End", Method, 0, ""},
+ {"(*CompositeLit).Pos", Method, 0, ""},
+ {"(*DeclStmt).End", Method, 0, ""},
+ {"(*DeclStmt).Pos", Method, 0, ""},
+ {"(*DeferStmt).End", Method, 0, ""},
+ {"(*DeferStmt).Pos", Method, 0, ""},
+ {"(*Ellipsis).End", Method, 0, ""},
+ {"(*Ellipsis).Pos", Method, 0, ""},
+ {"(*EmptyStmt).End", Method, 0, ""},
+ {"(*EmptyStmt).Pos", Method, 0, ""},
+ {"(*ExprStmt).End", Method, 0, ""},
+ {"(*ExprStmt).Pos", Method, 0, ""},
+ {"(*Field).End", Method, 0, ""},
+ {"(*Field).Pos", Method, 0, ""},
+ {"(*FieldList).End", Method, 0, ""},
+ {"(*FieldList).NumFields", Method, 0, ""},
+ {"(*FieldList).Pos", Method, 0, ""},
+ {"(*File).End", Method, 0, ""},
+ {"(*File).Pos", Method, 0, ""},
+ {"(*ForStmt).End", Method, 0, ""},
+ {"(*ForStmt).Pos", Method, 0, ""},
+ {"(*FuncDecl).End", Method, 0, ""},
+ {"(*FuncDecl).Pos", Method, 0, ""},
+ {"(*FuncLit).End", Method, 0, ""},
+ {"(*FuncLit).Pos", Method, 0, ""},
+ {"(*FuncType).End", Method, 0, ""},
+ {"(*FuncType).Pos", Method, 0, ""},
+ {"(*GenDecl).End", Method, 0, ""},
+ {"(*GenDecl).Pos", Method, 0, ""},
+ {"(*GoStmt).End", Method, 0, ""},
+ {"(*GoStmt).Pos", Method, 0, ""},
+ {"(*Ident).End", Method, 0, ""},
+ {"(*Ident).IsExported", Method, 0, ""},
+ {"(*Ident).Pos", Method, 0, ""},
+ {"(*Ident).String", Method, 0, ""},
+ {"(*IfStmt).End", Method, 0, ""},
+ {"(*IfStmt).Pos", Method, 0, ""},
+ {"(*ImportSpec).End", Method, 0, ""},
+ {"(*ImportSpec).Pos", Method, 0, ""},
+ {"(*IncDecStmt).End", Method, 0, ""},
+ {"(*IncDecStmt).Pos", Method, 0, ""},
+ {"(*IndexExpr).End", Method, 0, ""},
+ {"(*IndexExpr).Pos", Method, 0, ""},
+ {"(*IndexListExpr).End", Method, 18, ""},
+ {"(*IndexListExpr).Pos", Method, 18, ""},
+ {"(*InterfaceType).End", Method, 0, ""},
+ {"(*InterfaceType).Pos", Method, 0, ""},
+ {"(*KeyValueExpr).End", Method, 0, ""},
+ {"(*KeyValueExpr).Pos", Method, 0, ""},
+ {"(*LabeledStmt).End", Method, 0, ""},
+ {"(*LabeledStmt).Pos", Method, 0, ""},
+ {"(*MapType).End", Method, 0, ""},
+ {"(*MapType).Pos", Method, 0, ""},
+ {"(*Object).Pos", Method, 0, ""},
+ {"(*Package).End", Method, 0, ""},
+ {"(*Package).Pos", Method, 0, ""},
+ {"(*ParenExpr).End", Method, 0, ""},
+ {"(*ParenExpr).Pos", Method, 0, ""},
+ {"(*RangeStmt).End", Method, 0, ""},
+ {"(*RangeStmt).Pos", Method, 0, ""},
+ {"(*ReturnStmt).End", Method, 0, ""},
+ {"(*ReturnStmt).Pos", Method, 0, ""},
+ {"(*Scope).Insert", Method, 0, ""},
+ {"(*Scope).Lookup", Method, 0, ""},
+ {"(*Scope).String", Method, 0, ""},
+ {"(*SelectStmt).End", Method, 0, ""},
+ {"(*SelectStmt).Pos", Method, 0, ""},
+ {"(*SelectorExpr).End", Method, 0, ""},
+ {"(*SelectorExpr).Pos", Method, 0, ""},
+ {"(*SendStmt).End", Method, 0, ""},
+ {"(*SendStmt).Pos", Method, 0, ""},
+ {"(*SliceExpr).End", Method, 0, ""},
+ {"(*SliceExpr).Pos", Method, 0, ""},
+ {"(*StarExpr).End", Method, 0, ""},
+ {"(*StarExpr).Pos", Method, 0, ""},
+ {"(*StructType).End", Method, 0, ""},
+ {"(*StructType).Pos", Method, 0, ""},
+ {"(*SwitchStmt).End", Method, 0, ""},
+ {"(*SwitchStmt).Pos", Method, 0, ""},
+ {"(*TypeAssertExpr).End", Method, 0, ""},
+ {"(*TypeAssertExpr).Pos", Method, 0, ""},
+ {"(*TypeSpec).End", Method, 0, ""},
+ {"(*TypeSpec).Pos", Method, 0, ""},
+ {"(*TypeSwitchStmt).End", Method, 0, ""},
+ {"(*TypeSwitchStmt).Pos", Method, 0, ""},
+ {"(*UnaryExpr).End", Method, 0, ""},
+ {"(*UnaryExpr).Pos", Method, 0, ""},
+ {"(*ValueSpec).End", Method, 0, ""},
+ {"(*ValueSpec).Pos", Method, 0, ""},
+ {"(CommentMap).Comments", Method, 1, ""},
+ {"(CommentMap).Filter", Method, 1, ""},
+ {"(CommentMap).String", Method, 1, ""},
+ {"(CommentMap).Update", Method, 1, ""},
+ {"(ObjKind).String", Method, 0, ""},
+ {"ArrayType", Type, 0, ""},
+ {"ArrayType.Elt", Field, 0, ""},
+ {"ArrayType.Lbrack", Field, 0, ""},
+ {"ArrayType.Len", Field, 0, ""},
+ {"AssignStmt", Type, 0, ""},
+ {"AssignStmt.Lhs", Field, 0, ""},
+ {"AssignStmt.Rhs", Field, 0, ""},
+ {"AssignStmt.Tok", Field, 0, ""},
+ {"AssignStmt.TokPos", Field, 0, ""},
+ {"Bad", Const, 0, ""},
+ {"BadDecl", Type, 0, ""},
+ {"BadDecl.From", Field, 0, ""},
+ {"BadDecl.To", Field, 0, ""},
+ {"BadExpr", Type, 0, ""},
+ {"BadExpr.From", Field, 0, ""},
+ {"BadExpr.To", Field, 0, ""},
+ {"BadStmt", Type, 0, ""},
+ {"BadStmt.From", Field, 0, ""},
+ {"BadStmt.To", Field, 0, ""},
+ {"BasicLit", Type, 0, ""},
+ {"BasicLit.Kind", Field, 0, ""},
+ {"BasicLit.Value", Field, 0, ""},
+ {"BasicLit.ValuePos", Field, 0, ""},
+ {"BinaryExpr", Type, 0, ""},
+ {"BinaryExpr.Op", Field, 0, ""},
+ {"BinaryExpr.OpPos", Field, 0, ""},
+ {"BinaryExpr.X", Field, 0, ""},
+ {"BinaryExpr.Y", Field, 0, ""},
+ {"BlockStmt", Type, 0, ""},
+ {"BlockStmt.Lbrace", Field, 0, ""},
+ {"BlockStmt.List", Field, 0, ""},
+ {"BlockStmt.Rbrace", Field, 0, ""},
+ {"BranchStmt", Type, 0, ""},
+ {"BranchStmt.Label", Field, 0, ""},
+ {"BranchStmt.Tok", Field, 0, ""},
+ {"BranchStmt.TokPos", Field, 0, ""},
+ {"CallExpr", Type, 0, ""},
+ {"CallExpr.Args", Field, 0, ""},
+ {"CallExpr.Ellipsis", Field, 0, ""},
+ {"CallExpr.Fun", Field, 0, ""},
+ {"CallExpr.Lparen", Field, 0, ""},
+ {"CallExpr.Rparen", Field, 0, ""},
+ {"CaseClause", Type, 0, ""},
+ {"CaseClause.Body", Field, 0, ""},
+ {"CaseClause.Case", Field, 0, ""},
+ {"CaseClause.Colon", Field, 0, ""},
+ {"CaseClause.List", Field, 0, ""},
+ {"ChanDir", Type, 0, ""},
+ {"ChanType", Type, 0, ""},
+ {"ChanType.Arrow", Field, 1, ""},
+ {"ChanType.Begin", Field, 0, ""},
+ {"ChanType.Dir", Field, 0, ""},
+ {"ChanType.Value", Field, 0, ""},
+ {"CommClause", Type, 0, ""},
+ {"CommClause.Body", Field, 0, ""},
+ {"CommClause.Case", Field, 0, ""},
+ {"CommClause.Colon", Field, 0, ""},
+ {"CommClause.Comm", Field, 0, ""},
+ {"Comment", Type, 0, ""},
+ {"Comment.Slash", Field, 0, ""},
+ {"Comment.Text", Field, 0, ""},
+ {"CommentGroup", Type, 0, ""},
+ {"CommentGroup.List", Field, 0, ""},
+ {"CommentMap", Type, 1, ""},
+ {"CompositeLit", Type, 0, ""},
+ {"CompositeLit.Elts", Field, 0, ""},
+ {"CompositeLit.Incomplete", Field, 11, ""},
+ {"CompositeLit.Lbrace", Field, 0, ""},
+ {"CompositeLit.Rbrace", Field, 0, ""},
+ {"CompositeLit.Type", Field, 0, ""},
+ {"Con", Const, 0, ""},
+ {"Decl", Type, 0, ""},
+ {"DeclStmt", Type, 0, ""},
+ {"DeclStmt.Decl", Field, 0, ""},
+ {"DeferStmt", Type, 0, ""},
+ {"DeferStmt.Call", Field, 0, ""},
+ {"DeferStmt.Defer", Field, 0, ""},
+ {"Ellipsis", Type, 0, ""},
+ {"Ellipsis.Ellipsis", Field, 0, ""},
+ {"Ellipsis.Elt", Field, 0, ""},
+ {"EmptyStmt", Type, 0, ""},
+ {"EmptyStmt.Implicit", Field, 5, ""},
+ {"EmptyStmt.Semicolon", Field, 0, ""},
+ {"Expr", Type, 0, ""},
+ {"ExprStmt", Type, 0, ""},
+ {"ExprStmt.X", Field, 0, ""},
+ {"Field", Type, 0, ""},
+ {"Field.Comment", Field, 0, ""},
+ {"Field.Doc", Field, 0, ""},
+ {"Field.Names", Field, 0, ""},
+ {"Field.Tag", Field, 0, ""},
+ {"Field.Type", Field, 0, ""},
+ {"FieldFilter", Type, 0, ""},
+ {"FieldList", Type, 0, ""},
+ {"FieldList.Closing", Field, 0, ""},
+ {"FieldList.List", Field, 0, ""},
+ {"FieldList.Opening", Field, 0, ""},
+ {"File", Type, 0, ""},
+ {"File.Comments", Field, 0, ""},
+ {"File.Decls", Field, 0, ""},
+ {"File.Doc", Field, 0, ""},
+ {"File.FileEnd", Field, 20, ""},
+ {"File.FileStart", Field, 20, ""},
+ {"File.GoVersion", Field, 21, ""},
+ {"File.Imports", Field, 0, ""},
+ {"File.Name", Field, 0, ""},
+ {"File.Package", Field, 0, ""},
+ {"File.Scope", Field, 0, ""},
+ {"File.Unresolved", Field, 0, ""},
+ {"FileExports", Func, 0, "func(src *File) bool"},
+ {"Filter", Type, 0, ""},
+ {"FilterDecl", Func, 0, "func(decl Decl, f Filter) bool"},
+ {"FilterFile", Func, 0, "func(src *File, f Filter) bool"},
+ {"FilterFuncDuplicates", Const, 0, ""},
+ {"FilterImportDuplicates", Const, 0, ""},
+ {"FilterPackage", Func, 0, "func(pkg *Package, f Filter) bool"},
+ {"FilterUnassociatedComments", Const, 0, ""},
+ {"ForStmt", Type, 0, ""},
+ {"ForStmt.Body", Field, 0, ""},
+ {"ForStmt.Cond", Field, 0, ""},
+ {"ForStmt.For", Field, 0, ""},
+ {"ForStmt.Init", Field, 0, ""},
+ {"ForStmt.Post", Field, 0, ""},
+ {"Fprint", Func, 0, "func(w io.Writer, fset *token.FileSet, x any, f FieldFilter) error"},
+ {"Fun", Const, 0, ""},
+ {"FuncDecl", Type, 0, ""},
+ {"FuncDecl.Body", Field, 0, ""},
+ {"FuncDecl.Doc", Field, 0, ""},
+ {"FuncDecl.Name", Field, 0, ""},
+ {"FuncDecl.Recv", Field, 0, ""},
+ {"FuncDecl.Type", Field, 0, ""},
+ {"FuncLit", Type, 0, ""},
+ {"FuncLit.Body", Field, 0, ""},
+ {"FuncLit.Type", Field, 0, ""},
+ {"FuncType", Type, 0, ""},
+ {"FuncType.Func", Field, 0, ""},
+ {"FuncType.Params", Field, 0, ""},
+ {"FuncType.Results", Field, 0, ""},
+ {"FuncType.TypeParams", Field, 18, ""},
+ {"GenDecl", Type, 0, ""},
+ {"GenDecl.Doc", Field, 0, ""},
+ {"GenDecl.Lparen", Field, 0, ""},
+ {"GenDecl.Rparen", Field, 0, ""},
+ {"GenDecl.Specs", Field, 0, ""},
+ {"GenDecl.Tok", Field, 0, ""},
+ {"GenDecl.TokPos", Field, 0, ""},
+ {"GoStmt", Type, 0, ""},
+ {"GoStmt.Call", Field, 0, ""},
+ {"GoStmt.Go", Field, 0, ""},
+ {"Ident", Type, 0, ""},
+ {"Ident.Name", Field, 0, ""},
+ {"Ident.NamePos", Field, 0, ""},
+ {"Ident.Obj", Field, 0, ""},
+ {"IfStmt", Type, 0, ""},
+ {"IfStmt.Body", Field, 0, ""},
+ {"IfStmt.Cond", Field, 0, ""},
+ {"IfStmt.Else", Field, 0, ""},
+ {"IfStmt.If", Field, 0, ""},
+ {"IfStmt.Init", Field, 0, ""},
+ {"ImportSpec", Type, 0, ""},
+ {"ImportSpec.Comment", Field, 0, ""},
+ {"ImportSpec.Doc", Field, 0, ""},
+ {"ImportSpec.EndPos", Field, 0, ""},
+ {"ImportSpec.Name", Field, 0, ""},
+ {"ImportSpec.Path", Field, 0, ""},
+ {"Importer", Type, 0, ""},
+ {"IncDecStmt", Type, 0, ""},
+ {"IncDecStmt.Tok", Field, 0, ""},
+ {"IncDecStmt.TokPos", Field, 0, ""},
+ {"IncDecStmt.X", Field, 0, ""},
+ {"IndexExpr", Type, 0, ""},
+ {"IndexExpr.Index", Field, 0, ""},
+ {"IndexExpr.Lbrack", Field, 0, ""},
+ {"IndexExpr.Rbrack", Field, 0, ""},
+ {"IndexExpr.X", Field, 0, ""},
+ {"IndexListExpr", Type, 18, ""},
+ {"IndexListExpr.Indices", Field, 18, ""},
+ {"IndexListExpr.Lbrack", Field, 18, ""},
+ {"IndexListExpr.Rbrack", Field, 18, ""},
+ {"IndexListExpr.X", Field, 18, ""},
+ {"Inspect", Func, 0, "func(node Node, f func(Node) bool)"},
+ {"InterfaceType", Type, 0, ""},
+ {"InterfaceType.Incomplete", Field, 0, ""},
+ {"InterfaceType.Interface", Field, 0, ""},
+ {"InterfaceType.Methods", Field, 0, ""},
+ {"IsExported", Func, 0, "func(name string) bool"},
+ {"IsGenerated", Func, 21, "func(file *File) bool"},
+ {"KeyValueExpr", Type, 0, ""},
+ {"KeyValueExpr.Colon", Field, 0, ""},
+ {"KeyValueExpr.Key", Field, 0, ""},
+ {"KeyValueExpr.Value", Field, 0, ""},
+ {"LabeledStmt", Type, 0, ""},
+ {"LabeledStmt.Colon", Field, 0, ""},
+ {"LabeledStmt.Label", Field, 0, ""},
+ {"LabeledStmt.Stmt", Field, 0, ""},
+ {"Lbl", Const, 0, ""},
+ {"MapType", Type, 0, ""},
+ {"MapType.Key", Field, 0, ""},
+ {"MapType.Map", Field, 0, ""},
+ {"MapType.Value", Field, 0, ""},
+ {"MergeMode", Type, 0, ""},
+ {"MergePackageFiles", Func, 0, "func(pkg *Package, mode MergeMode) *File"},
+ {"NewCommentMap", Func, 1, "func(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap"},
+ {"NewIdent", Func, 0, "func(name string) *Ident"},
+ {"NewObj", Func, 0, "func(kind ObjKind, name string) *Object"},
+ {"NewPackage", Func, 0, "func(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error)"},
+ {"NewScope", Func, 0, "func(outer *Scope) *Scope"},
+ {"Node", Type, 0, ""},
+ {"NotNilFilter", Func, 0, "func(_ string, v reflect.Value) bool"},
+ {"ObjKind", Type, 0, ""},
+ {"Object", Type, 0, ""},
+ {"Object.Data", Field, 0, ""},
+ {"Object.Decl", Field, 0, ""},
+ {"Object.Kind", Field, 0, ""},
+ {"Object.Name", Field, 0, ""},
+ {"Object.Type", Field, 0, ""},
+ {"Package", Type, 0, ""},
+ {"Package.Files", Field, 0, ""},
+ {"Package.Imports", Field, 0, ""},
+ {"Package.Name", Field, 0, ""},
+ {"Package.Scope", Field, 0, ""},
+ {"PackageExports", Func, 0, "func(pkg *Package) bool"},
+ {"ParenExpr", Type, 0, ""},
+ {"ParenExpr.Lparen", Field, 0, ""},
+ {"ParenExpr.Rparen", Field, 0, ""},
+ {"ParenExpr.X", Field, 0, ""},
+ {"Pkg", Const, 0, ""},
+ {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"},
+ {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"},
+ {"Print", Func, 0, "func(fset *token.FileSet, x any) error"},
+ {"RECV", Const, 0, ""},
+ {"RangeStmt", Type, 0, ""},
+ {"RangeStmt.Body", Field, 0, ""},
+ {"RangeStmt.For", Field, 0, ""},
+ {"RangeStmt.Key", Field, 0, ""},
+ {"RangeStmt.Range", Field, 20, ""},
+ {"RangeStmt.Tok", Field, 0, ""},
+ {"RangeStmt.TokPos", Field, 0, ""},
+ {"RangeStmt.Value", Field, 0, ""},
+ {"RangeStmt.X", Field, 0, ""},
+ {"ReturnStmt", Type, 0, ""},
+ {"ReturnStmt.Results", Field, 0, ""},
+ {"ReturnStmt.Return", Field, 0, ""},
+ {"SEND", Const, 0, ""},
+ {"Scope", Type, 0, ""},
+ {"Scope.Objects", Field, 0, ""},
+ {"Scope.Outer", Field, 0, ""},
+ {"SelectStmt", Type, 0, ""},
+ {"SelectStmt.Body", Field, 0, ""},
+ {"SelectStmt.Select", Field, 0, ""},
+ {"SelectorExpr", Type, 0, ""},
+ {"SelectorExpr.Sel", Field, 0, ""},
+ {"SelectorExpr.X", Field, 0, ""},
+ {"SendStmt", Type, 0, ""},
+ {"SendStmt.Arrow", Field, 0, ""},
+ {"SendStmt.Chan", Field, 0, ""},
+ {"SendStmt.Value", Field, 0, ""},
+ {"SliceExpr", Type, 0, ""},
+ {"SliceExpr.High", Field, 0, ""},
+ {"SliceExpr.Lbrack", Field, 0, ""},
+ {"SliceExpr.Low", Field, 0, ""},
+ {"SliceExpr.Max", Field, 2, ""},
+ {"SliceExpr.Rbrack", Field, 0, ""},
+ {"SliceExpr.Slice3", Field, 2, ""},
+ {"SliceExpr.X", Field, 0, ""},
+ {"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"},
+ {"Spec", Type, 0, ""},
+ {"StarExpr", Type, 0, ""},
+ {"StarExpr.Star", Field, 0, ""},
+ {"StarExpr.X", Field, 0, ""},
+ {"Stmt", Type, 0, ""},
+ {"StructType", Type, 0, ""},
+ {"StructType.Fields", Field, 0, ""},
+ {"StructType.Incomplete", Field, 0, ""},
+ {"StructType.Struct", Field, 0, ""},
+ {"SwitchStmt", Type, 0, ""},
+ {"SwitchStmt.Body", Field, 0, ""},
+ {"SwitchStmt.Init", Field, 0, ""},
+ {"SwitchStmt.Switch", Field, 0, ""},
+ {"SwitchStmt.Tag", Field, 0, ""},
+ {"Typ", Const, 0, ""},
+ {"TypeAssertExpr", Type, 0, ""},
+ {"TypeAssertExpr.Lparen", Field, 2, ""},
+ {"TypeAssertExpr.Rparen", Field, 2, ""},
+ {"TypeAssertExpr.Type", Field, 0, ""},
+ {"TypeAssertExpr.X", Field, 0, ""},
+ {"TypeSpec", Type, 0, ""},
+ {"TypeSpec.Assign", Field, 9, ""},
+ {"TypeSpec.Comment", Field, 0, ""},
+ {"TypeSpec.Doc", Field, 0, ""},
+ {"TypeSpec.Name", Field, 0, ""},
+ {"TypeSpec.Type", Field, 0, ""},
+ {"TypeSpec.TypeParams", Field, 18, ""},
+ {"TypeSwitchStmt", Type, 0, ""},
+ {"TypeSwitchStmt.Assign", Field, 0, ""},
+ {"TypeSwitchStmt.Body", Field, 0, ""},
+ {"TypeSwitchStmt.Init", Field, 0, ""},
+ {"TypeSwitchStmt.Switch", Field, 0, ""},
+ {"UnaryExpr", Type, 0, ""},
+ {"UnaryExpr.Op", Field, 0, ""},
+ {"UnaryExpr.OpPos", Field, 0, ""},
+ {"UnaryExpr.X", Field, 0, ""},
+ {"Unparen", Func, 22, "func(e Expr) Expr"},
+ {"ValueSpec", Type, 0, ""},
+ {"ValueSpec.Comment", Field, 0, ""},
+ {"ValueSpec.Doc", Field, 0, ""},
+ {"ValueSpec.Names", Field, 0, ""},
+ {"ValueSpec.Type", Field, 0, ""},
+ {"ValueSpec.Values", Field, 0, ""},
+ {"Var", Const, 0, ""},
+ {"Visitor", Type, 0, ""},
+ {"Walk", Func, 0, "func(v Visitor, node Node)"},
+ },
+ "go/build": {
+ {"(*Context).Import", Method, 0, ""},
+ {"(*Context).ImportDir", Method, 0, ""},
+ {"(*Context).MatchFile", Method, 2, ""},
+ {"(*Context).SrcDirs", Method, 0, ""},
+ {"(*MultiplePackageError).Error", Method, 4, ""},
+ {"(*NoGoError).Error", Method, 0, ""},
+ {"(*Package).IsCommand", Method, 0, ""},
+ {"AllowBinary", Const, 0, ""},
+ {"ArchChar", Func, 0, "func(goarch string) (string, error)"},
+ {"Context", Type, 0, ""},
+ {"Context.BuildTags", Field, 0, ""},
+ {"Context.CgoEnabled", Field, 0, ""},
+ {"Context.Compiler", Field, 0, ""},
+ {"Context.Dir", Field, 14, ""},
+ {"Context.GOARCH", Field, 0, ""},
+ {"Context.GOOS", Field, 0, ""},
+ {"Context.GOPATH", Field, 0, ""},
+ {"Context.GOROOT", Field, 0, ""},
+ {"Context.HasSubdir", Field, 0, ""},
+ {"Context.InstallSuffix", Field, 1, ""},
+ {"Context.IsAbsPath", Field, 0, ""},
+ {"Context.IsDir", Field, 0, ""},
+ {"Context.JoinPath", Field, 0, ""},
+ {"Context.OpenFile", Field, 0, ""},
+ {"Context.ReadDir", Field, 0, ""},
+ {"Context.ReleaseTags", Field, 1, ""},
+ {"Context.SplitPathList", Field, 0, ""},
+ {"Context.ToolTags", Field, 17, ""},
+ {"Context.UseAllFiles", Field, 0, ""},
+ {"Default", Var, 0, ""},
+ {"Directive", Type, 21, ""},
+ {"Directive.Pos", Field, 21, ""},
+ {"Directive.Text", Field, 21, ""},
+ {"FindOnly", Const, 0, ""},
+ {"IgnoreVendor", Const, 6, ""},
+ {"Import", Func, 0, "func(path string, srcDir string, mode ImportMode) (*Package, error)"},
+ {"ImportComment", Const, 4, ""},
+ {"ImportDir", Func, 0, "func(dir string, mode ImportMode) (*Package, error)"},
+ {"ImportMode", Type, 0, ""},
+ {"IsLocalImport", Func, 0, "func(path string) bool"},
+ {"MultiplePackageError", Type, 4, ""},
+ {"MultiplePackageError.Dir", Field, 4, ""},
+ {"MultiplePackageError.Files", Field, 4, ""},
+ {"MultiplePackageError.Packages", Field, 4, ""},
+ {"NoGoError", Type, 0, ""},
+ {"NoGoError.Dir", Field, 0, ""},
+ {"Package", Type, 0, ""},
+ {"Package.AllTags", Field, 2, ""},
+ {"Package.BinDir", Field, 0, ""},
+ {"Package.BinaryOnly", Field, 7, ""},
+ {"Package.CFiles", Field, 0, ""},
+ {"Package.CXXFiles", Field, 2, ""},
+ {"Package.CgoCFLAGS", Field, 0, ""},
+ {"Package.CgoCPPFLAGS", Field, 2, ""},
+ {"Package.CgoCXXFLAGS", Field, 2, ""},
+ {"Package.CgoFFLAGS", Field, 7, ""},
+ {"Package.CgoFiles", Field, 0, ""},
+ {"Package.CgoLDFLAGS", Field, 0, ""},
+ {"Package.CgoPkgConfig", Field, 0, ""},
+ {"Package.ConflictDir", Field, 2, ""},
+ {"Package.Dir", Field, 0, ""},
+ {"Package.Directives", Field, 21, ""},
+ {"Package.Doc", Field, 0, ""},
+ {"Package.EmbedPatternPos", Field, 16, ""},
+ {"Package.EmbedPatterns", Field, 16, ""},
+ {"Package.FFiles", Field, 7, ""},
+ {"Package.GoFiles", Field, 0, ""},
+ {"Package.Goroot", Field, 0, ""},
+ {"Package.HFiles", Field, 0, ""},
+ {"Package.IgnoredGoFiles", Field, 1, ""},
+ {"Package.IgnoredOtherFiles", Field, 16, ""},
+ {"Package.ImportComment", Field, 4, ""},
+ {"Package.ImportPath", Field, 0, ""},
+ {"Package.ImportPos", Field, 0, ""},
+ {"Package.Imports", Field, 0, ""},
+ {"Package.InvalidGoFiles", Field, 6, ""},
+ {"Package.MFiles", Field, 3, ""},
+ {"Package.Name", Field, 0, ""},
+ {"Package.PkgObj", Field, 0, ""},
+ {"Package.PkgRoot", Field, 0, ""},
+ {"Package.PkgTargetRoot", Field, 5, ""},
+ {"Package.Root", Field, 0, ""},
+ {"Package.SFiles", Field, 0, ""},
+ {"Package.SrcRoot", Field, 0, ""},
+ {"Package.SwigCXXFiles", Field, 1, ""},
+ {"Package.SwigFiles", Field, 1, ""},
+ {"Package.SysoFiles", Field, 0, ""},
+ {"Package.TestDirectives", Field, 21, ""},
+ {"Package.TestEmbedPatternPos", Field, 16, ""},
+ {"Package.TestEmbedPatterns", Field, 16, ""},
+ {"Package.TestGoFiles", Field, 0, ""},
+ {"Package.TestImportPos", Field, 0, ""},
+ {"Package.TestImports", Field, 0, ""},
+ {"Package.XTestDirectives", Field, 21, ""},
+ {"Package.XTestEmbedPatternPos", Field, 16, ""},
+ {"Package.XTestEmbedPatterns", Field, 16, ""},
+ {"Package.XTestGoFiles", Field, 0, ""},
+ {"Package.XTestImportPos", Field, 0, ""},
+ {"Package.XTestImports", Field, 0, ""},
+ {"ToolDir", Var, 0, ""},
+ },
+ "go/build/constraint": {
+ {"(*AndExpr).Eval", Method, 16, ""},
+ {"(*AndExpr).String", Method, 16, ""},
+ {"(*NotExpr).Eval", Method, 16, ""},
+ {"(*NotExpr).String", Method, 16, ""},
+ {"(*OrExpr).Eval", Method, 16, ""},
+ {"(*OrExpr).String", Method, 16, ""},
+ {"(*SyntaxError).Error", Method, 16, ""},
+ {"(*TagExpr).Eval", Method, 16, ""},
+ {"(*TagExpr).String", Method, 16, ""},
+ {"AndExpr", Type, 16, ""},
+ {"AndExpr.X", Field, 16, ""},
+ {"AndExpr.Y", Field, 16, ""},
+ {"Expr", Type, 16, ""},
+ {"GoVersion", Func, 21, "func(x Expr) string"},
+ {"IsGoBuild", Func, 16, "func(line string) bool"},
+ {"IsPlusBuild", Func, 16, "func(line string) bool"},
+ {"NotExpr", Type, 16, ""},
+ {"NotExpr.X", Field, 16, ""},
+ {"OrExpr", Type, 16, ""},
+ {"OrExpr.X", Field, 16, ""},
+ {"OrExpr.Y", Field, 16, ""},
+ {"Parse", Func, 16, "func(line string) (Expr, error)"},
+ {"PlusBuildLines", Func, 16, "func(x Expr) ([]string, error)"},
+ {"SyntaxError", Type, 16, ""},
+ {"SyntaxError.Err", Field, 16, ""},
+ {"SyntaxError.Offset", Field, 16, ""},
+ {"TagExpr", Type, 16, ""},
+ {"TagExpr.Tag", Field, 16, ""},
+ },
+ "go/constant": {
+ {"(Kind).String", Method, 18, ""},
+ {"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"},
+ {"BitLen", Func, 5, "func(x Value) int"},
+ {"Bool", Const, 5, ""},
+ {"BoolVal", Func, 5, "func(x Value) bool"},
+ {"Bytes", Func, 5, "func(x Value) []byte"},
+ {"Compare", Func, 5, "func(x_ Value, op token.Token, y_ Value) bool"},
+ {"Complex", Const, 5, ""},
+ {"Denom", Func, 5, "func(x Value) Value"},
+ {"Float", Const, 5, ""},
+ {"Float32Val", Func, 5, "func(x Value) (float32, bool)"},
+ {"Float64Val", Func, 5, "func(x Value) (float64, bool)"},
+ {"Imag", Func, 5, "func(x Value) Value"},
+ {"Int", Const, 5, ""},
+ {"Int64Val", Func, 5, "func(x Value) (int64, bool)"},
+ {"Kind", Type, 5, ""},
+ {"Make", Func, 13, "func(x any) Value"},
+ {"MakeBool", Func, 5, "func(b bool) Value"},
+ {"MakeFloat64", Func, 5, "func(x float64) Value"},
+ {"MakeFromBytes", Func, 5, "func(bytes []byte) Value"},
+ {"MakeFromLiteral", Func, 5, "func(lit string, tok token.Token, zero uint) Value"},
+ {"MakeImag", Func, 5, "func(x Value) Value"},
+ {"MakeInt64", Func, 5, "func(x int64) Value"},
+ {"MakeString", Func, 5, "func(s string) Value"},
+ {"MakeUint64", Func, 5, "func(x uint64) Value"},
+ {"MakeUnknown", Func, 5, "func() Value"},
+ {"Num", Func, 5, "func(x Value) Value"},
+ {"Real", Func, 5, "func(x Value) Value"},
+ {"Shift", Func, 5, "func(x Value, op token.Token, s uint) Value"},
+ {"Sign", Func, 5, "func(x Value) int"},
+ {"String", Const, 5, ""},
+ {"StringVal", Func, 5, "func(x Value) string"},
+ {"ToComplex", Func, 6, "func(x Value) Value"},
+ {"ToFloat", Func, 6, "func(x Value) Value"},
+ {"ToInt", Func, 6, "func(x Value) Value"},
+ {"Uint64Val", Func, 5, "func(x Value) (uint64, bool)"},
+ {"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"},
+ {"Unknown", Const, 5, ""},
+ {"Val", Func, 13, "func(x Value) any"},
+ {"Value", Type, 5, ""},
+ },
+ "go/doc": {
+ {"(*Package).Filter", Method, 0, ""},
+ {"(*Package).HTML", Method, 19, ""},
+ {"(*Package).Markdown", Method, 19, ""},
+ {"(*Package).Parser", Method, 19, ""},
+ {"(*Package).Printer", Method, 19, ""},
+ {"(*Package).Synopsis", Method, 19, ""},
+ {"(*Package).Text", Method, 19, ""},
+ {"AllDecls", Const, 0, ""},
+ {"AllMethods", Const, 0, ""},
+ {"Example", Type, 0, ""},
+ {"Example.Code", Field, 0, ""},
+ {"Example.Comments", Field, 0, ""},
+ {"Example.Doc", Field, 0, ""},
+ {"Example.EmptyOutput", Field, 1, ""},
+ {"Example.Name", Field, 0, ""},
+ {"Example.Order", Field, 1, ""},
+ {"Example.Output", Field, 0, ""},
+ {"Example.Play", Field, 1, ""},
+ {"Example.Suffix", Field, 14, ""},
+ {"Example.Unordered", Field, 7, ""},
+ {"Examples", Func, 0, "func(testFiles ...*ast.File) []*Example"},
+ {"Filter", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"Func.Decl", Field, 0, ""},
+ {"Func.Doc", Field, 0, ""},
+ {"Func.Examples", Field, 14, ""},
+ {"Func.Level", Field, 0, ""},
+ {"Func.Name", Field, 0, ""},
+ {"Func.Orig", Field, 0, ""},
+ {"Func.Recv", Field, 0, ""},
+ {"IllegalPrefixes", Var, 1, ""},
+ {"IsPredeclared", Func, 8, "func(s string) bool"},
+ {"Mode", Type, 0, ""},
+ {"New", Func, 0, "func(pkg *ast.Package, importPath string, mode Mode) *Package"},
+ {"NewFromFiles", Func, 14, "func(fset *token.FileSet, files []*ast.File, importPath string, opts ...any) (*Package, error)"},
+ {"Note", Type, 1, ""},
+ {"Note.Body", Field, 1, ""},
+ {"Note.End", Field, 1, ""},
+ {"Note.Pos", Field, 1, ""},
+ {"Note.UID", Field, 1, ""},
+ {"Package", Type, 0, ""},
+ {"Package.Bugs", Field, 0, ""},
+ {"Package.Consts", Field, 0, ""},
+ {"Package.Doc", Field, 0, ""},
+ {"Package.Examples", Field, 14, ""},
+ {"Package.Filenames", Field, 0, ""},
+ {"Package.Funcs", Field, 0, ""},
+ {"Package.ImportPath", Field, 0, ""},
+ {"Package.Imports", Field, 0, ""},
+ {"Package.Name", Field, 0, ""},
+ {"Package.Notes", Field, 1, ""},
+ {"Package.Types", Field, 0, ""},
+ {"Package.Vars", Field, 0, ""},
+ {"PreserveAST", Const, 12, ""},
+ {"Synopsis", Func, 0, "func(text string) string"},
+ {"ToHTML", Func, 0, "func(w io.Writer, text string, words map[string]string)"},
+ {"ToText", Func, 0, "func(w io.Writer, text string, prefix string, codePrefix string, width int)"},
+ {"Type", Type, 0, ""},
+ {"Type.Consts", Field, 0, ""},
+ {"Type.Decl", Field, 0, ""},
+ {"Type.Doc", Field, 0, ""},
+ {"Type.Examples", Field, 14, ""},
+ {"Type.Funcs", Field, 0, ""},
+ {"Type.Methods", Field, 0, ""},
+ {"Type.Name", Field, 0, ""},
+ {"Type.Vars", Field, 0, ""},
+ {"Value", Type, 0, ""},
+ {"Value.Decl", Field, 0, ""},
+ {"Value.Doc", Field, 0, ""},
+ {"Value.Names", Field, 0, ""},
+ },
+ "go/doc/comment": {
+ {"(*DocLink).DefaultURL", Method, 19, ""},
+ {"(*Heading).DefaultID", Method, 19, ""},
+ {"(*List).BlankBefore", Method, 19, ""},
+ {"(*List).BlankBetween", Method, 19, ""},
+ {"(*Parser).Parse", Method, 19, ""},
+ {"(*Printer).Comment", Method, 19, ""},
+ {"(*Printer).HTML", Method, 19, ""},
+ {"(*Printer).Markdown", Method, 19, ""},
+ {"(*Printer).Text", Method, 19, ""},
+ {"Block", Type, 19, ""},
+ {"Code", Type, 19, ""},
+ {"Code.Text", Field, 19, ""},
+ {"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"},
+ {"Doc", Type, 19, ""},
+ {"Doc.Content", Field, 19, ""},
+ {"Doc.Links", Field, 19, ""},
+ {"DocLink", Type, 19, ""},
+ {"DocLink.ImportPath", Field, 19, ""},
+ {"DocLink.Name", Field, 19, ""},
+ {"DocLink.Recv", Field, 19, ""},
+ {"DocLink.Text", Field, 19, ""},
+ {"Heading", Type, 19, ""},
+ {"Heading.Text", Field, 19, ""},
+ {"Italic", Type, 19, ""},
+ {"Link", Type, 19, ""},
+ {"Link.Auto", Field, 19, ""},
+ {"Link.Text", Field, 19, ""},
+ {"Link.URL", Field, 19, ""},
+ {"LinkDef", Type, 19, ""},
+ {"LinkDef.Text", Field, 19, ""},
+ {"LinkDef.URL", Field, 19, ""},
+ {"LinkDef.Used", Field, 19, ""},
+ {"List", Type, 19, ""},
+ {"List.ForceBlankBefore", Field, 19, ""},
+ {"List.ForceBlankBetween", Field, 19, ""},
+ {"List.Items", Field, 19, ""},
+ {"ListItem", Type, 19, ""},
+ {"ListItem.Content", Field, 19, ""},
+ {"ListItem.Number", Field, 19, ""},
+ {"Paragraph", Type, 19, ""},
+ {"Paragraph.Text", Field, 19, ""},
+ {"Parser", Type, 19, ""},
+ {"Parser.LookupPackage", Field, 19, ""},
+ {"Parser.LookupSym", Field, 19, ""},
+ {"Parser.Words", Field, 19, ""},
+ {"Plain", Type, 19, ""},
+ {"Printer", Type, 19, ""},
+ {"Printer.DocLinkBaseURL", Field, 19, ""},
+ {"Printer.DocLinkURL", Field, 19, ""},
+ {"Printer.HeadingID", Field, 19, ""},
+ {"Printer.HeadingLevel", Field, 19, ""},
+ {"Printer.TextCodePrefix", Field, 19, ""},
+ {"Printer.TextPrefix", Field, 19, ""},
+ {"Printer.TextWidth", Field, 19, ""},
+ {"Text", Type, 19, ""},
+ },
+ "go/format": {
+ {"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"},
+ {"Source", Func, 1, "func(src []byte) ([]byte, error)"},
+ },
+ "go/importer": {
+ {"Default", Func, 5, "func() types.Importer"},
+ {"For", Func, 5, "func(compiler string, lookup Lookup) types.Importer"},
+ {"ForCompiler", Func, 12, "func(fset *token.FileSet, compiler string, lookup Lookup) types.Importer"},
+ {"Lookup", Type, 5, ""},
+ },
+ "go/parser": {
+ {"AllErrors", Const, 1, ""},
+ {"DeclarationErrors", Const, 0, ""},
+ {"ImportsOnly", Const, 0, ""},
+ {"Mode", Type, 0, ""},
+ {"PackageClauseOnly", Const, 0, ""},
+ {"ParseComments", Const, 0, ""},
+ {"ParseDir", Func, 0, "func(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error)"},
+ {"ParseExpr", Func, 0, "func(x string) (ast.Expr, error)"},
+ {"ParseExprFrom", Func, 5, "func(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error)"},
+ {"ParseFile", Func, 0, "func(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error)"},
+ {"SkipObjectResolution", Const, 17, ""},
+ {"SpuriousErrors", Const, 0, ""},
+ {"Trace", Const, 0, ""},
+ },
+ "go/printer": {
+ {"(*Config).Fprint", Method, 0, ""},
+ {"CommentedNode", Type, 0, ""},
+ {"CommentedNode.Comments", Field, 0, ""},
+ {"CommentedNode.Node", Field, 0, ""},
+ {"Config", Type, 0, ""},
+ {"Config.Indent", Field, 1, ""},
+ {"Config.Mode", Field, 0, ""},
+ {"Config.Tabwidth", Field, 0, ""},
+ {"Fprint", Func, 0, "func(output io.Writer, fset *token.FileSet, node any) error"},
+ {"Mode", Type, 0, ""},
+ {"RawFormat", Const, 0, ""},
+ {"SourcePos", Const, 0, ""},
+ {"TabIndent", Const, 0, ""},
+ {"UseSpaces", Const, 0, ""},
+ },
+ "go/scanner": {
+ {"(*ErrorList).Add", Method, 0, ""},
+ {"(*ErrorList).RemoveMultiples", Method, 0, ""},
+ {"(*ErrorList).Reset", Method, 0, ""},
+ {"(*Scanner).Init", Method, 0, ""},
+ {"(*Scanner).Scan", Method, 0, ""},
+ {"(Error).Error", Method, 0, ""},
+ {"(ErrorList).Err", Method, 0, ""},
+ {"(ErrorList).Error", Method, 0, ""},
+ {"(ErrorList).Len", Method, 0, ""},
+ {"(ErrorList).Less", Method, 0, ""},
+ {"(ErrorList).Sort", Method, 0, ""},
+ {"(ErrorList).Swap", Method, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Msg", Field, 0, ""},
+ {"Error.Pos", Field, 0, ""},
+ {"ErrorHandler", Type, 0, ""},
+ {"ErrorList", Type, 0, ""},
+ {"Mode", Type, 0, ""},
+ {"PrintError", Func, 0, "func(w io.Writer, err error)"},
+ {"ScanComments", Const, 0, ""},
+ {"Scanner", Type, 0, ""},
+ {"Scanner.ErrorCount", Field, 0, ""},
+ },
+ "go/token": {
+ {"(*File).AddLine", Method, 0, ""},
+ {"(*File).AddLineColumnInfo", Method, 11, ""},
+ {"(*File).AddLineInfo", Method, 0, ""},
+ {"(*File).Base", Method, 0, ""},
+ {"(*File).Line", Method, 0, ""},
+ {"(*File).LineCount", Method, 0, ""},
+ {"(*File).LineStart", Method, 12, ""},
+ {"(*File).Lines", Method, 21, ""},
+ {"(*File).MergeLine", Method, 2, ""},
+ {"(*File).Name", Method, 0, ""},
+ {"(*File).Offset", Method, 0, ""},
+ {"(*File).Pos", Method, 0, ""},
+ {"(*File).Position", Method, 0, ""},
+ {"(*File).PositionFor", Method, 4, ""},
+ {"(*File).SetLines", Method, 0, ""},
+ {"(*File).SetLinesForContent", Method, 0, ""},
+ {"(*File).Size", Method, 0, ""},
+ {"(*FileSet).AddExistingFiles", Method, 25, ""},
+ {"(*FileSet).AddFile", Method, 0, ""},
+ {"(*FileSet).Base", Method, 0, ""},
+ {"(*FileSet).File", Method, 0, ""},
+ {"(*FileSet).Iterate", Method, 0, ""},
+ {"(*FileSet).Position", Method, 0, ""},
+ {"(*FileSet).PositionFor", Method, 4, ""},
+ {"(*FileSet).Read", Method, 0, ""},
+ {"(*FileSet).RemoveFile", Method, 20, ""},
+ {"(*FileSet).Write", Method, 0, ""},
+ {"(*Position).IsValid", Method, 0, ""},
+ {"(Pos).IsValid", Method, 0, ""},
+ {"(Position).String", Method, 0, ""},
+ {"(Token).IsKeyword", Method, 0, ""},
+ {"(Token).IsLiteral", Method, 0, ""},
+ {"(Token).IsOperator", Method, 0, ""},
+ {"(Token).Precedence", Method, 0, ""},
+ {"(Token).String", Method, 0, ""},
+ {"ADD", Const, 0, ""},
+ {"ADD_ASSIGN", Const, 0, ""},
+ {"AND", Const, 0, ""},
+ {"AND_ASSIGN", Const, 0, ""},
+ {"AND_NOT", Const, 0, ""},
+ {"AND_NOT_ASSIGN", Const, 0, ""},
+ {"ARROW", Const, 0, ""},
+ {"ASSIGN", Const, 0, ""},
+ {"BREAK", Const, 0, ""},
+ {"CASE", Const, 0, ""},
+ {"CHAN", Const, 0, ""},
+ {"CHAR", Const, 0, ""},
+ {"COLON", Const, 0, ""},
+ {"COMMA", Const, 0, ""},
+ {"COMMENT", Const, 0, ""},
+ {"CONST", Const, 0, ""},
+ {"CONTINUE", Const, 0, ""},
+ {"DEC", Const, 0, ""},
+ {"DEFAULT", Const, 0, ""},
+ {"DEFER", Const, 0, ""},
+ {"DEFINE", Const, 0, ""},
+ {"ELLIPSIS", Const, 0, ""},
+ {"ELSE", Const, 0, ""},
+ {"EOF", Const, 0, ""},
+ {"EQL", Const, 0, ""},
+ {"FALLTHROUGH", Const, 0, ""},
+ {"FLOAT", Const, 0, ""},
+ {"FOR", Const, 0, ""},
+ {"FUNC", Const, 0, ""},
+ {"File", Type, 0, ""},
+ {"FileSet", Type, 0, ""},
+ {"GEQ", Const, 0, ""},
+ {"GO", Const, 0, ""},
+ {"GOTO", Const, 0, ""},
+ {"GTR", Const, 0, ""},
+ {"HighestPrec", Const, 0, ""},
+ {"IDENT", Const, 0, ""},
+ {"IF", Const, 0, ""},
+ {"ILLEGAL", Const, 0, ""},
+ {"IMAG", Const, 0, ""},
+ {"IMPORT", Const, 0, ""},
+ {"INC", Const, 0, ""},
+ {"INT", Const, 0, ""},
+ {"INTERFACE", Const, 0, ""},
+ {"IsExported", Func, 13, "func(name string) bool"},
+ {"IsIdentifier", Func, 13, "func(name string) bool"},
+ {"IsKeyword", Func, 13, "func(name string) bool"},
+ {"LAND", Const, 0, ""},
+ {"LBRACE", Const, 0, ""},
+ {"LBRACK", Const, 0, ""},
+ {"LEQ", Const, 0, ""},
+ {"LOR", Const, 0, ""},
+ {"LPAREN", Const, 0, ""},
+ {"LSS", Const, 0, ""},
+ {"Lookup", Func, 0, "func(ident string) Token"},
+ {"LowestPrec", Const, 0, ""},
+ {"MAP", Const, 0, ""},
+ {"MUL", Const, 0, ""},
+ {"MUL_ASSIGN", Const, 0, ""},
+ {"NEQ", Const, 0, ""},
+ {"NOT", Const, 0, ""},
+ {"NewFileSet", Func, 0, "func() *FileSet"},
+ {"NoPos", Const, 0, ""},
+ {"OR", Const, 0, ""},
+ {"OR_ASSIGN", Const, 0, ""},
+ {"PACKAGE", Const, 0, ""},
+ {"PERIOD", Const, 0, ""},
+ {"Pos", Type, 0, ""},
+ {"Position", Type, 0, ""},
+ {"Position.Column", Field, 0, ""},
+ {"Position.Filename", Field, 0, ""},
+ {"Position.Line", Field, 0, ""},
+ {"Position.Offset", Field, 0, ""},
+ {"QUO", Const, 0, ""},
+ {"QUO_ASSIGN", Const, 0, ""},
+ {"RANGE", Const, 0, ""},
+ {"RBRACE", Const, 0, ""},
+ {"RBRACK", Const, 0, ""},
+ {"REM", Const, 0, ""},
+ {"REM_ASSIGN", Const, 0, ""},
+ {"RETURN", Const, 0, ""},
+ {"RPAREN", Const, 0, ""},
+ {"SELECT", Const, 0, ""},
+ {"SEMICOLON", Const, 0, ""},
+ {"SHL", Const, 0, ""},
+ {"SHL_ASSIGN", Const, 0, ""},
+ {"SHR", Const, 0, ""},
+ {"SHR_ASSIGN", Const, 0, ""},
+ {"STRING", Const, 0, ""},
+ {"STRUCT", Const, 0, ""},
+ {"SUB", Const, 0, ""},
+ {"SUB_ASSIGN", Const, 0, ""},
+ {"SWITCH", Const, 0, ""},
+ {"TILDE", Const, 18, ""},
+ {"TYPE", Const, 0, ""},
+ {"Token", Type, 0, ""},
+ {"UnaryPrec", Const, 0, ""},
+ {"VAR", Const, 0, ""},
+ {"XOR", Const, 0, ""},
+ {"XOR_ASSIGN", Const, 0, ""},
+ },
+ "go/types": {
+ {"(*Alias).Obj", Method, 22, ""},
+ {"(*Alias).Origin", Method, 23, ""},
+ {"(*Alias).Rhs", Method, 23, ""},
+ {"(*Alias).SetTypeParams", Method, 23, ""},
+ {"(*Alias).String", Method, 22, ""},
+ {"(*Alias).TypeArgs", Method, 23, ""},
+ {"(*Alias).TypeParams", Method, 23, ""},
+ {"(*Alias).Underlying", Method, 22, ""},
+ {"(*ArgumentError).Error", Method, 18, ""},
+ {"(*ArgumentError).Unwrap", Method, 18, ""},
+ {"(*Array).Elem", Method, 5, ""},
+ {"(*Array).Len", Method, 5, ""},
+ {"(*Array).String", Method, 5, ""},
+ {"(*Array).Underlying", Method, 5, ""},
+ {"(*Basic).Info", Method, 5, ""},
+ {"(*Basic).Kind", Method, 5, ""},
+ {"(*Basic).Name", Method, 5, ""},
+ {"(*Basic).String", Method, 5, ""},
+ {"(*Basic).Underlying", Method, 5, ""},
+ {"(*Builtin).Exported", Method, 5, ""},
+ {"(*Builtin).Id", Method, 5, ""},
+ {"(*Builtin).Name", Method, 5, ""},
+ {"(*Builtin).Parent", Method, 5, ""},
+ {"(*Builtin).Pkg", Method, 5, ""},
+ {"(*Builtin).Pos", Method, 5, ""},
+ {"(*Builtin).String", Method, 5, ""},
+ {"(*Builtin).Type", Method, 5, ""},
+ {"(*Chan).Dir", Method, 5, ""},
+ {"(*Chan).Elem", Method, 5, ""},
+ {"(*Chan).String", Method, 5, ""},
+ {"(*Chan).Underlying", Method, 5, ""},
+ {"(*Checker).Files", Method, 5, ""},
+ {"(*Config).Check", Method, 5, ""},
+ {"(*Const).Exported", Method, 5, ""},
+ {"(*Const).Id", Method, 5, ""},
+ {"(*Const).Name", Method, 5, ""},
+ {"(*Const).Parent", Method, 5, ""},
+ {"(*Const).Pkg", Method, 5, ""},
+ {"(*Const).Pos", Method, 5, ""},
+ {"(*Const).String", Method, 5, ""},
+ {"(*Const).Type", Method, 5, ""},
+ {"(*Const).Val", Method, 5, ""},
+ {"(*Func).Exported", Method, 5, ""},
+ {"(*Func).FullName", Method, 5, ""},
+ {"(*Func).Id", Method, 5, ""},
+ {"(*Func).Name", Method, 5, ""},
+ {"(*Func).Origin", Method, 19, ""},
+ {"(*Func).Parent", Method, 5, ""},
+ {"(*Func).Pkg", Method, 5, ""},
+ {"(*Func).Pos", Method, 5, ""},
+ {"(*Func).Scope", Method, 5, ""},
+ {"(*Func).Signature", Method, 23, ""},
+ {"(*Func).String", Method, 5, ""},
+ {"(*Func).Type", Method, 5, ""},
+ {"(*Info).ObjectOf", Method, 5, ""},
+ {"(*Info).PkgNameOf", Method, 22, ""},
+ {"(*Info).TypeOf", Method, 5, ""},
+ {"(*Initializer).String", Method, 5, ""},
+ {"(*Interface).Complete", Method, 5, ""},
+ {"(*Interface).Embedded", Method, 5, ""},
+ {"(*Interface).EmbeddedType", Method, 11, ""},
+ {"(*Interface).EmbeddedTypes", Method, 24, ""},
+ {"(*Interface).Empty", Method, 5, ""},
+ {"(*Interface).ExplicitMethod", Method, 5, ""},
+ {"(*Interface).ExplicitMethods", Method, 24, ""},
+ {"(*Interface).IsComparable", Method, 18, ""},
+ {"(*Interface).IsImplicit", Method, 18, ""},
+ {"(*Interface).IsMethodSet", Method, 18, ""},
+ {"(*Interface).MarkImplicit", Method, 18, ""},
+ {"(*Interface).Method", Method, 5, ""},
+ {"(*Interface).Methods", Method, 24, ""},
+ {"(*Interface).NumEmbeddeds", Method, 5, ""},
+ {"(*Interface).NumExplicitMethods", Method, 5, ""},
+ {"(*Interface).NumMethods", Method, 5, ""},
+ {"(*Interface).String", Method, 5, ""},
+ {"(*Interface).Underlying", Method, 5, ""},
+ {"(*Label).Exported", Method, 5, ""},
+ {"(*Label).Id", Method, 5, ""},
+ {"(*Label).Name", Method, 5, ""},
+ {"(*Label).Parent", Method, 5, ""},
+ {"(*Label).Pkg", Method, 5, ""},
+ {"(*Label).Pos", Method, 5, ""},
+ {"(*Label).String", Method, 5, ""},
+ {"(*Label).Type", Method, 5, ""},
+ {"(*Map).Elem", Method, 5, ""},
+ {"(*Map).Key", Method, 5, ""},
+ {"(*Map).String", Method, 5, ""},
+ {"(*Map).Underlying", Method, 5, ""},
+ {"(*MethodSet).At", Method, 5, ""},
+ {"(*MethodSet).Len", Method, 5, ""},
+ {"(*MethodSet).Lookup", Method, 5, ""},
+ {"(*MethodSet).Methods", Method, 24, ""},
+ {"(*MethodSet).String", Method, 5, ""},
+ {"(*Named).AddMethod", Method, 5, ""},
+ {"(*Named).Method", Method, 5, ""},
+ {"(*Named).Methods", Method, 24, ""},
+ {"(*Named).NumMethods", Method, 5, ""},
+ {"(*Named).Obj", Method, 5, ""},
+ {"(*Named).Origin", Method, 18, ""},
+ {"(*Named).SetTypeParams", Method, 18, ""},
+ {"(*Named).SetUnderlying", Method, 5, ""},
+ {"(*Named).String", Method, 5, ""},
+ {"(*Named).TypeArgs", Method, 18, ""},
+ {"(*Named).TypeParams", Method, 18, ""},
+ {"(*Named).Underlying", Method, 5, ""},
+ {"(*Nil).Exported", Method, 5, ""},
+ {"(*Nil).Id", Method, 5, ""},
+ {"(*Nil).Name", Method, 5, ""},
+ {"(*Nil).Parent", Method, 5, ""},
+ {"(*Nil).Pkg", Method, 5, ""},
+ {"(*Nil).Pos", Method, 5, ""},
+ {"(*Nil).String", Method, 5, ""},
+ {"(*Nil).Type", Method, 5, ""},
+ {"(*Package).Complete", Method, 5, ""},
+ {"(*Package).GoVersion", Method, 21, ""},
+ {"(*Package).Imports", Method, 5, ""},
+ {"(*Package).MarkComplete", Method, 5, ""},
+ {"(*Package).Name", Method, 5, ""},
+ {"(*Package).Path", Method, 5, ""},
+ {"(*Package).Scope", Method, 5, ""},
+ {"(*Package).SetImports", Method, 5, ""},
+ {"(*Package).SetName", Method, 6, ""},
+ {"(*Package).String", Method, 5, ""},
+ {"(*PkgName).Exported", Method, 5, ""},
+ {"(*PkgName).Id", Method, 5, ""},
+ {"(*PkgName).Imported", Method, 5, ""},
+ {"(*PkgName).Name", Method, 5, ""},
+ {"(*PkgName).Parent", Method, 5, ""},
+ {"(*PkgName).Pkg", Method, 5, ""},
+ {"(*PkgName).Pos", Method, 5, ""},
+ {"(*PkgName).String", Method, 5, ""},
+ {"(*PkgName).Type", Method, 5, ""},
+ {"(*Pointer).Elem", Method, 5, ""},
+ {"(*Pointer).String", Method, 5, ""},
+ {"(*Pointer).Underlying", Method, 5, ""},
+ {"(*Scope).Child", Method, 5, ""},
+ {"(*Scope).Children", Method, 24, ""},
+ {"(*Scope).Contains", Method, 5, ""},
+ {"(*Scope).End", Method, 5, ""},
+ {"(*Scope).Innermost", Method, 5, ""},
+ {"(*Scope).Insert", Method, 5, ""},
+ {"(*Scope).Len", Method, 5, ""},
+ {"(*Scope).Lookup", Method, 5, ""},
+ {"(*Scope).LookupParent", Method, 5, ""},
+ {"(*Scope).Names", Method, 5, ""},
+ {"(*Scope).NumChildren", Method, 5, ""},
+ {"(*Scope).Parent", Method, 5, ""},
+ {"(*Scope).Pos", Method, 5, ""},
+ {"(*Scope).String", Method, 5, ""},
+ {"(*Scope).WriteTo", Method, 5, ""},
+ {"(*Selection).Index", Method, 5, ""},
+ {"(*Selection).Indirect", Method, 5, ""},
+ {"(*Selection).Kind", Method, 5, ""},
+ {"(*Selection).Obj", Method, 5, ""},
+ {"(*Selection).Recv", Method, 5, ""},
+ {"(*Selection).String", Method, 5, ""},
+ {"(*Selection).Type", Method, 5, ""},
+ {"(*Signature).Params", Method, 5, ""},
+ {"(*Signature).Recv", Method, 5, ""},
+ {"(*Signature).RecvTypeParams", Method, 18, ""},
+ {"(*Signature).Results", Method, 5, ""},
+ {"(*Signature).String", Method, 5, ""},
+ {"(*Signature).TypeParams", Method, 18, ""},
+ {"(*Signature).Underlying", Method, 5, ""},
+ {"(*Signature).Variadic", Method, 5, ""},
+ {"(*Slice).Elem", Method, 5, ""},
+ {"(*Slice).String", Method, 5, ""},
+ {"(*Slice).Underlying", Method, 5, ""},
+ {"(*StdSizes).Alignof", Method, 5, ""},
+ {"(*StdSizes).Offsetsof", Method, 5, ""},
+ {"(*StdSizes).Sizeof", Method, 5, ""},
+ {"(*Struct).Field", Method, 5, ""},
+ {"(*Struct).Fields", Method, 24, ""},
+ {"(*Struct).NumFields", Method, 5, ""},
+ {"(*Struct).String", Method, 5, ""},
+ {"(*Struct).Tag", Method, 5, ""},
+ {"(*Struct).Underlying", Method, 5, ""},
+ {"(*Term).String", Method, 18, ""},
+ {"(*Term).Tilde", Method, 18, ""},
+ {"(*Term).Type", Method, 18, ""},
+ {"(*Tuple).At", Method, 5, ""},
+ {"(*Tuple).Len", Method, 5, ""},
+ {"(*Tuple).String", Method, 5, ""},
+ {"(*Tuple).Underlying", Method, 5, ""},
+ {"(*Tuple).Variables", Method, 24, ""},
+ {"(*TypeList).At", Method, 18, ""},
+ {"(*TypeList).Len", Method, 18, ""},
+ {"(*TypeList).Types", Method, 24, ""},
+ {"(*TypeName).Exported", Method, 5, ""},
+ {"(*TypeName).Id", Method, 5, ""},
+ {"(*TypeName).IsAlias", Method, 9, ""},
+ {"(*TypeName).Name", Method, 5, ""},
+ {"(*TypeName).Parent", Method, 5, ""},
+ {"(*TypeName).Pkg", Method, 5, ""},
+ {"(*TypeName).Pos", Method, 5, ""},
+ {"(*TypeName).String", Method, 5, ""},
+ {"(*TypeName).Type", Method, 5, ""},
+ {"(*TypeParam).Constraint", Method, 18, ""},
+ {"(*TypeParam).Index", Method, 18, ""},
+ {"(*TypeParam).Obj", Method, 18, ""},
+ {"(*TypeParam).SetConstraint", Method, 18, ""},
+ {"(*TypeParam).String", Method, 18, ""},
+ {"(*TypeParam).Underlying", Method, 18, ""},
+ {"(*TypeParamList).At", Method, 18, ""},
+ {"(*TypeParamList).Len", Method, 18, ""},
+ {"(*TypeParamList).TypeParams", Method, 24, ""},
+ {"(*Union).Len", Method, 18, ""},
+ {"(*Union).String", Method, 18, ""},
+ {"(*Union).Term", Method, 18, ""},
+ {"(*Union).Terms", Method, 24, ""},
+ {"(*Union).Underlying", Method, 18, ""},
+ {"(*Var).Anonymous", Method, 5, ""},
+ {"(*Var).Embedded", Method, 11, ""},
+ {"(*Var).Exported", Method, 5, ""},
+ {"(*Var).Id", Method, 5, ""},
+ {"(*Var).IsField", Method, 5, ""},
+ {"(*Var).Kind", Method, 25, ""},
+ {"(*Var).Name", Method, 5, ""},
+ {"(*Var).Origin", Method, 19, ""},
+ {"(*Var).Parent", Method, 5, ""},
+ {"(*Var).Pkg", Method, 5, ""},
+ {"(*Var).Pos", Method, 5, ""},
+ {"(*Var).SetKind", Method, 25, ""},
+ {"(*Var).String", Method, 5, ""},
+ {"(*Var).Type", Method, 5, ""},
+ {"(Checker).ObjectOf", Method, 5, ""},
+ {"(Checker).PkgNameOf", Method, 22, ""},
+ {"(Checker).TypeOf", Method, 5, ""},
+ {"(Error).Error", Method, 5, ""},
+ {"(TypeAndValue).Addressable", Method, 5, ""},
+ {"(TypeAndValue).Assignable", Method, 5, ""},
+ {"(TypeAndValue).HasOk", Method, 5, ""},
+ {"(TypeAndValue).IsBuiltin", Method, 5, ""},
+ {"(TypeAndValue).IsNil", Method, 5, ""},
+ {"(TypeAndValue).IsType", Method, 5, ""},
+ {"(TypeAndValue).IsValue", Method, 5, ""},
+ {"(TypeAndValue).IsVoid", Method, 5, ""},
+ {"(VarKind).String", Method, 25, ""},
+ {"Alias", Type, 22, ""},
+ {"ArgumentError", Type, 18, ""},
+ {"ArgumentError.Err", Field, 18, ""},
+ {"ArgumentError.Index", Field, 18, ""},
+ {"Array", Type, 5, ""},
+ {"AssertableTo", Func, 5, "func(V *Interface, T Type) bool"},
+ {"AssignableTo", Func, 5, "func(V Type, T Type) bool"},
+ {"Basic", Type, 5, ""},
+ {"BasicInfo", Type, 5, ""},
+ {"BasicKind", Type, 5, ""},
+ {"Bool", Const, 5, ""},
+ {"Builtin", Type, 5, ""},
+ {"Byte", Const, 5, ""},
+ {"Chan", Type, 5, ""},
+ {"ChanDir", Type, 5, ""},
+ {"CheckExpr", Func, 13, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr ast.Expr, info *Info) (err error)"},
+ {"Checker", Type, 5, ""},
+ {"Checker.Info", Field, 5, ""},
+ {"Comparable", Func, 5, "func(T Type) bool"},
+ {"Complex128", Const, 5, ""},
+ {"Complex64", Const, 5, ""},
+ {"Config", Type, 5, ""},
+ {"Config.Context", Field, 18, ""},
+ {"Config.DisableUnusedImportCheck", Field, 5, ""},
+ {"Config.Error", Field, 5, ""},
+ {"Config.FakeImportC", Field, 5, ""},
+ {"Config.GoVersion", Field, 18, ""},
+ {"Config.IgnoreFuncBodies", Field, 5, ""},
+ {"Config.Importer", Field, 5, ""},
+ {"Config.Sizes", Field, 5, ""},
+ {"Const", Type, 5, ""},
+ {"Context", Type, 18, ""},
+ {"ConvertibleTo", Func, 5, "func(V Type, T Type) bool"},
+ {"DefPredeclaredTestFuncs", Func, 5, "func()"},
+ {"Default", Func, 8, "func(t Type) Type"},
+ {"Error", Type, 5, ""},
+ {"Error.Fset", Field, 5, ""},
+ {"Error.Msg", Field, 5, ""},
+ {"Error.Pos", Field, 5, ""},
+ {"Error.Soft", Field, 5, ""},
+ {"Eval", Func, 5, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (_ TypeAndValue, err error)"},
+ {"ExprString", Func, 5, "func(x ast.Expr) string"},
+ {"FieldVal", Const, 5, ""},
+ {"FieldVar", Const, 25, ""},
+ {"Float32", Const, 5, ""},
+ {"Float64", Const, 5, ""},
+ {"Func", Type, 5, ""},
+ {"Id", Func, 5, "func(pkg *Package, name string) string"},
+ {"Identical", Func, 5, "func(x Type, y Type) bool"},
+ {"IdenticalIgnoreTags", Func, 8, "func(x Type, y Type) bool"},
+ {"Implements", Func, 5, "func(V Type, T *Interface) bool"},
+ {"ImportMode", Type, 6, ""},
+ {"Importer", Type, 5, ""},
+ {"ImporterFrom", Type, 6, ""},
+ {"Info", Type, 5, ""},
+ {"Info.Defs", Field, 5, ""},
+ {"Info.FileVersions", Field, 22, ""},
+ {"Info.Implicits", Field, 5, ""},
+ {"Info.InitOrder", Field, 5, ""},
+ {"Info.Instances", Field, 18, ""},
+ {"Info.Scopes", Field, 5, ""},
+ {"Info.Selections", Field, 5, ""},
+ {"Info.Types", Field, 5, ""},
+ {"Info.Uses", Field, 5, ""},
+ {"Initializer", Type, 5, ""},
+ {"Initializer.Lhs", Field, 5, ""},
+ {"Initializer.Rhs", Field, 5, ""},
+ {"Instance", Type, 18, ""},
+ {"Instance.Type", Field, 18, ""},
+ {"Instance.TypeArgs", Field, 18, ""},
+ {"Instantiate", Func, 18, "func(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error)"},
+ {"Int", Const, 5, ""},
+ {"Int16", Const, 5, ""},
+ {"Int32", Const, 5, ""},
+ {"Int64", Const, 5, ""},
+ {"Int8", Const, 5, ""},
+ {"Interface", Type, 5, ""},
+ {"Invalid", Const, 5, ""},
+ {"IsBoolean", Const, 5, ""},
+ {"IsComplex", Const, 5, ""},
+ {"IsConstType", Const, 5, ""},
+ {"IsFloat", Const, 5, ""},
+ {"IsInteger", Const, 5, ""},
+ {"IsInterface", Func, 5, "func(t Type) bool"},
+ {"IsNumeric", Const, 5, ""},
+ {"IsOrdered", Const, 5, ""},
+ {"IsString", Const, 5, ""},
+ {"IsUnsigned", Const, 5, ""},
+ {"IsUntyped", Const, 5, ""},
+ {"Label", Type, 5, ""},
+ {"LocalVar", Const, 25, ""},
+ {"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"},
+ {"LookupSelection", Func, 25, "func(T Type, addressable bool, pkg *Package, name string) (Selection, bool)"},
+ {"Map", Type, 5, ""},
+ {"MethodExpr", Const, 5, ""},
+ {"MethodSet", Type, 5, ""},
+ {"MethodVal", Const, 5, ""},
+ {"MissingMethod", Func, 5, "func(V Type, T *Interface, static bool) (method *Func, wrongType bool)"},
+ {"Named", Type, 5, ""},
+ {"NewAlias", Func, 22, "func(obj *TypeName, rhs Type) *Alias"},
+ {"NewArray", Func, 5, "func(elem Type, len int64) *Array"},
+ {"NewChan", Func, 5, "func(dir ChanDir, elem Type) *Chan"},
+ {"NewChecker", Func, 5, "func(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker"},
+ {"NewConst", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const"},
+ {"NewContext", Func, 18, "func() *Context"},
+ {"NewField", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, embedded bool) *Var"},
+ {"NewFunc", Func, 5, "func(pos token.Pos, pkg *Package, name string, sig *Signature) *Func"},
+ {"NewInterface", Func, 5, "func(methods []*Func, embeddeds []*Named) *Interface"},
+ {"NewInterfaceType", Func, 11, "func(methods []*Func, embeddeds []Type) *Interface"},
+ {"NewLabel", Func, 5, "func(pos token.Pos, pkg *Package, name string) *Label"},
+ {"NewMap", Func, 5, "func(key Type, elem Type) *Map"},
+ {"NewMethodSet", Func, 5, "func(T Type) *MethodSet"},
+ {"NewNamed", Func, 5, "func(obj *TypeName, underlying Type, methods []*Func) *Named"},
+ {"NewPackage", Func, 5, "func(path string, name string) *Package"},
+ {"NewParam", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
+ {"NewPkgName", Func, 5, "func(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName"},
+ {"NewPointer", Func, 5, "func(elem Type) *Pointer"},
+ {"NewScope", Func, 5, "func(parent *Scope, pos token.Pos, end token.Pos, comment string) *Scope"},
+ {"NewSignature", Func, 5, "func(recv *Var, params *Tuple, results *Tuple, variadic bool) *Signature"},
+ {"NewSignatureType", Func, 18, "func(recv *Var, recvTypeParams []*TypeParam, typeParams []*TypeParam, params *Tuple, results *Tuple, variadic bool) *Signature"},
+ {"NewSlice", Func, 5, "func(elem Type) *Slice"},
+ {"NewStruct", Func, 5, "func(fields []*Var, tags []string) *Struct"},
+ {"NewTerm", Func, 18, "func(tilde bool, typ Type) *Term"},
+ {"NewTuple", Func, 5, "func(x ...*Var) *Tuple"},
+ {"NewTypeName", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *TypeName"},
+ {"NewTypeParam", Func, 18, "func(obj *TypeName, constraint Type) *TypeParam"},
+ {"NewUnion", Func, 18, "func(terms []*Term) *Union"},
+ {"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
+ {"Nil", Type, 5, ""},
+ {"Object", Type, 5, ""},
+ {"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"},
+ {"Package", Type, 5, ""},
+ {"PackageVar", Const, 25, ""},
+ {"ParamVar", Const, 25, ""},
+ {"PkgName", Type, 5, ""},
+ {"Pointer", Type, 5, ""},
+ {"Qualifier", Type, 5, ""},
+ {"RecvOnly", Const, 5, ""},
+ {"RecvVar", Const, 25, ""},
+ {"RelativeTo", Func, 5, "func(pkg *Package) Qualifier"},
+ {"ResultVar", Const, 25, ""},
+ {"Rune", Const, 5, ""},
+ {"Satisfies", Func, 20, "func(V Type, T *Interface) bool"},
+ {"Scope", Type, 5, ""},
+ {"Selection", Type, 5, ""},
+ {"SelectionKind", Type, 5, ""},
+ {"SelectionString", Func, 5, "func(s *Selection, qf Qualifier) string"},
+ {"SendOnly", Const, 5, ""},
+ {"SendRecv", Const, 5, ""},
+ {"Signature", Type, 5, ""},
+ {"Sizes", Type, 5, ""},
+ {"SizesFor", Func, 9, "func(compiler string, arch string) Sizes"},
+ {"Slice", Type, 5, ""},
+ {"StdSizes", Type, 5, ""},
+ {"StdSizes.MaxAlign", Field, 5, ""},
+ {"StdSizes.WordSize", Field, 5, ""},
+ {"String", Const, 5, ""},
+ {"Struct", Type, 5, ""},
+ {"Term", Type, 18, ""},
+ {"Tuple", Type, 5, ""},
+ {"Typ", Var, 5, ""},
+ {"Type", Type, 5, ""},
+ {"TypeAndValue", Type, 5, ""},
+ {"TypeAndValue.Type", Field, 5, ""},
+ {"TypeAndValue.Value", Field, 5, ""},
+ {"TypeList", Type, 18, ""},
+ {"TypeName", Type, 5, ""},
+ {"TypeParam", Type, 18, ""},
+ {"TypeParamList", Type, 18, ""},
+ {"TypeString", Func, 5, "func(typ Type, qf Qualifier) string"},
+ {"Uint", Const, 5, ""},
+ {"Uint16", Const, 5, ""},
+ {"Uint32", Const, 5, ""},
+ {"Uint64", Const, 5, ""},
+ {"Uint8", Const, 5, ""},
+ {"Uintptr", Const, 5, ""},
+ {"Unalias", Func, 22, "func(t Type) Type"},
+ {"Union", Type, 18, ""},
+ {"Universe", Var, 5, ""},
+ {"Unsafe", Var, 5, ""},
+ {"UnsafePointer", Const, 5, ""},
+ {"UntypedBool", Const, 5, ""},
+ {"UntypedComplex", Const, 5, ""},
+ {"UntypedFloat", Const, 5, ""},
+ {"UntypedInt", Const, 5, ""},
+ {"UntypedNil", Const, 5, ""},
+ {"UntypedRune", Const, 5, ""},
+ {"UntypedString", Const, 5, ""},
+ {"Var", Type, 5, ""},
+ {"VarKind", Type, 25, ""},
+ {"WriteExpr", Func, 5, "func(buf *bytes.Buffer, x ast.Expr)"},
+ {"WriteSignature", Func, 5, "func(buf *bytes.Buffer, sig *Signature, qf Qualifier)"},
+ {"WriteType", Func, 5, "func(buf *bytes.Buffer, typ Type, qf Qualifier)"},
+ },
+ "go/version": {
+ {"Compare", Func, 22, "func(x string, y string) int"},
+ {"IsValid", Func, 22, "func(x string) bool"},
+ {"Lang", Func, 22, "func(x string) string"},
+ },
+ "hash": {
+ {"Cloner", Type, 25, ""},
+ {"Hash", Type, 0, ""},
+ {"Hash32", Type, 0, ""},
+ {"Hash64", Type, 0, ""},
+ {"XOF", Type, 25, ""},
+ },
+ "hash/adler32": {
+ {"Checksum", Func, 0, "func(data []byte) uint32"},
+ {"New", Func, 0, "func() hash.Hash32"},
+ {"Size", Const, 0, ""},
+ },
+ "hash/crc32": {
+ {"Castagnoli", Const, 0, ""},
+ {"Checksum", Func, 0, "func(data []byte, tab *Table) uint32"},
+ {"ChecksumIEEE", Func, 0, "func(data []byte) uint32"},
+ {"IEEE", Const, 0, ""},
+ {"IEEETable", Var, 0, ""},
+ {"Koopman", Const, 0, ""},
+ {"MakeTable", Func, 0, "func(poly uint32) *Table"},
+ {"New", Func, 0, "func(tab *Table) hash.Hash32"},
+ {"NewIEEE", Func, 0, "func() hash.Hash32"},
+ {"Size", Const, 0, ""},
+ {"Table", Type, 0, ""},
+ {"Update", Func, 0, "func(crc uint32, tab *Table, p []byte) uint32"},
+ },
+ "hash/crc64": {
+ {"Checksum", Func, 0, "func(data []byte, tab *Table) uint64"},
+ {"ECMA", Const, 0, ""},
+ {"ISO", Const, 0, ""},
+ {"MakeTable", Func, 0, "func(poly uint64) *Table"},
+ {"New", Func, 0, "func(tab *Table) hash.Hash64"},
+ {"Size", Const, 0, ""},
+ {"Table", Type, 0, ""},
+ {"Update", Func, 0, "func(crc uint64, tab *Table, p []byte) uint64"},
+ },
+ "hash/fnv": {
+ {"New128", Func, 9, "func() hash.Hash"},
+ {"New128a", Func, 9, "func() hash.Hash"},
+ {"New32", Func, 0, "func() hash.Hash32"},
+ {"New32a", Func, 0, "func() hash.Hash32"},
+ {"New64", Func, 0, "func() hash.Hash64"},
+ {"New64a", Func, 0, "func() hash.Hash64"},
+ },
+ "hash/maphash": {
+ {"(*Hash).BlockSize", Method, 14, ""},
+ {"(*Hash).Clone", Method, 25, ""},
+ {"(*Hash).Reset", Method, 14, ""},
+ {"(*Hash).Seed", Method, 14, ""},
+ {"(*Hash).SetSeed", Method, 14, ""},
+ {"(*Hash).Size", Method, 14, ""},
+ {"(*Hash).Sum", Method, 14, ""},
+ {"(*Hash).Sum64", Method, 14, ""},
+ {"(*Hash).Write", Method, 14, ""},
+ {"(*Hash).WriteByte", Method, 14, ""},
+ {"(*Hash).WriteString", Method, 14, ""},
+ {"Bytes", Func, 19, "func(seed Seed, b []byte) uint64"},
+ {"Comparable", Func, 24, "func[T comparable](seed Seed, v T) uint64"},
+ {"Hash", Type, 14, ""},
+ {"MakeSeed", Func, 14, "func() Seed"},
+ {"Seed", Type, 14, ""},
+ {"String", Func, 19, "func(seed Seed, s string) uint64"},
+ {"WriteComparable", Func, 24, "func[T comparable](h *Hash, x T)"},
+ },
+ "html": {
+ {"EscapeString", Func, 0, "func(s string) string"},
+ {"UnescapeString", Func, 0, "func(s string) string"},
+ },
+ "html/template": {
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Template).AddParseTree", Method, 0, ""},
+ {"(*Template).Clone", Method, 0, ""},
+ {"(*Template).DefinedTemplates", Method, 6, ""},
+ {"(*Template).Delims", Method, 0, ""},
+ {"(*Template).Execute", Method, 0, ""},
+ {"(*Template).ExecuteTemplate", Method, 0, ""},
+ {"(*Template).Funcs", Method, 0, ""},
+ {"(*Template).Lookup", Method, 0, ""},
+ {"(*Template).Name", Method, 0, ""},
+ {"(*Template).New", Method, 0, ""},
+ {"(*Template).Option", Method, 5, ""},
+ {"(*Template).Parse", Method, 0, ""},
+ {"(*Template).ParseFS", Method, 16, ""},
+ {"(*Template).ParseFiles", Method, 0, ""},
+ {"(*Template).ParseGlob", Method, 0, ""},
+ {"(*Template).Templates", Method, 0, ""},
+ {"CSS", Type, 0, ""},
+ {"ErrAmbigContext", Const, 0, ""},
+ {"ErrBadHTML", Const, 0, ""},
+ {"ErrBranchEnd", Const, 0, ""},
+ {"ErrEndContext", Const, 0, ""},
+ {"ErrJSTemplate", Const, 21, ""},
+ {"ErrNoSuchTemplate", Const, 0, ""},
+ {"ErrOutputContext", Const, 0, ""},
+ {"ErrPartialCharset", Const, 0, ""},
+ {"ErrPartialEscape", Const, 0, ""},
+ {"ErrPredefinedEscaper", Const, 9, ""},
+ {"ErrRangeLoopReentry", Const, 0, ""},
+ {"ErrSlashAmbig", Const, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Description", Field, 0, ""},
+ {"Error.ErrorCode", Field, 0, ""},
+ {"Error.Line", Field, 0, ""},
+ {"Error.Name", Field, 0, ""},
+ {"Error.Node", Field, 4, ""},
+ {"ErrorCode", Type, 0, ""},
+ {"FuncMap", Type, 0, ""},
+ {"HTML", Type, 0, ""},
+ {"HTMLAttr", Type, 0, ""},
+ {"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"HTMLEscapeString", Func, 0, "func(s string) string"},
+ {"HTMLEscaper", Func, 0, "func(args ...any) string"},
+ {"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
+ {"JS", Type, 0, ""},
+ {"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"JSEscapeString", Func, 0, "func(s string) string"},
+ {"JSEscaper", Func, 0, "func(args ...any) string"},
+ {"JSStr", Type, 0, ""},
+ {"Must", Func, 0, "func(t *Template, err error) *Template"},
+ {"New", Func, 0, "func(name string) *Template"},
+ {"OK", Const, 0, ""},
+ {"ParseFS", Func, 16, "func(fs fs.FS, patterns ...string) (*Template, error)"},
+ {"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
+ {"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
+ {"Srcset", Type, 10, ""},
+ {"Template", Type, 0, ""},
+ {"Template.Tree", Field, 2, ""},
+ {"URL", Type, 0, ""},
+ {"URLQueryEscaper", Func, 0, "func(args ...any) string"},
+ },
+ "image": {
+ {"(*Alpha).AlphaAt", Method, 4, ""},
+ {"(*Alpha).At", Method, 0, ""},
+ {"(*Alpha).Bounds", Method, 0, ""},
+ {"(*Alpha).ColorModel", Method, 0, ""},
+ {"(*Alpha).Opaque", Method, 0, ""},
+ {"(*Alpha).PixOffset", Method, 0, ""},
+ {"(*Alpha).RGBA64At", Method, 17, ""},
+ {"(*Alpha).Set", Method, 0, ""},
+ {"(*Alpha).SetAlpha", Method, 0, ""},
+ {"(*Alpha).SetRGBA64", Method, 17, ""},
+ {"(*Alpha).SubImage", Method, 0, ""},
+ {"(*Alpha16).Alpha16At", Method, 4, ""},
+ {"(*Alpha16).At", Method, 0, ""},
+ {"(*Alpha16).Bounds", Method, 0, ""},
+ {"(*Alpha16).ColorModel", Method, 0, ""},
+ {"(*Alpha16).Opaque", Method, 0, ""},
+ {"(*Alpha16).PixOffset", Method, 0, ""},
+ {"(*Alpha16).RGBA64At", Method, 17, ""},
+ {"(*Alpha16).Set", Method, 0, ""},
+ {"(*Alpha16).SetAlpha16", Method, 0, ""},
+ {"(*Alpha16).SetRGBA64", Method, 17, ""},
+ {"(*Alpha16).SubImage", Method, 0, ""},
+ {"(*CMYK).At", Method, 5, ""},
+ {"(*CMYK).Bounds", Method, 5, ""},
+ {"(*CMYK).CMYKAt", Method, 5, ""},
+ {"(*CMYK).ColorModel", Method, 5, ""},
+ {"(*CMYK).Opaque", Method, 5, ""},
+ {"(*CMYK).PixOffset", Method, 5, ""},
+ {"(*CMYK).RGBA64At", Method, 17, ""},
+ {"(*CMYK).Set", Method, 5, ""},
+ {"(*CMYK).SetCMYK", Method, 5, ""},
+ {"(*CMYK).SetRGBA64", Method, 17, ""},
+ {"(*CMYK).SubImage", Method, 5, ""},
+ {"(*Gray).At", Method, 0, ""},
+ {"(*Gray).Bounds", Method, 0, ""},
+ {"(*Gray).ColorModel", Method, 0, ""},
+ {"(*Gray).GrayAt", Method, 4, ""},
+ {"(*Gray).Opaque", Method, 0, ""},
+ {"(*Gray).PixOffset", Method, 0, ""},
+ {"(*Gray).RGBA64At", Method, 17, ""},
+ {"(*Gray).Set", Method, 0, ""},
+ {"(*Gray).SetGray", Method, 0, ""},
+ {"(*Gray).SetRGBA64", Method, 17, ""},
+ {"(*Gray).SubImage", Method, 0, ""},
+ {"(*Gray16).At", Method, 0, ""},
+ {"(*Gray16).Bounds", Method, 0, ""},
+ {"(*Gray16).ColorModel", Method, 0, ""},
+ {"(*Gray16).Gray16At", Method, 4, ""},
+ {"(*Gray16).Opaque", Method, 0, ""},
+ {"(*Gray16).PixOffset", Method, 0, ""},
+ {"(*Gray16).RGBA64At", Method, 17, ""},
+ {"(*Gray16).Set", Method, 0, ""},
+ {"(*Gray16).SetGray16", Method, 0, ""},
+ {"(*Gray16).SetRGBA64", Method, 17, ""},
+ {"(*Gray16).SubImage", Method, 0, ""},
+ {"(*NRGBA).At", Method, 0, ""},
+ {"(*NRGBA).Bounds", Method, 0, ""},
+ {"(*NRGBA).ColorModel", Method, 0, ""},
+ {"(*NRGBA).NRGBAAt", Method, 4, ""},
+ {"(*NRGBA).Opaque", Method, 0, ""},
+ {"(*NRGBA).PixOffset", Method, 0, ""},
+ {"(*NRGBA).RGBA64At", Method, 17, ""},
+ {"(*NRGBA).Set", Method, 0, ""},
+ {"(*NRGBA).SetNRGBA", Method, 0, ""},
+ {"(*NRGBA).SetRGBA64", Method, 17, ""},
+ {"(*NRGBA).SubImage", Method, 0, ""},
+ {"(*NRGBA64).At", Method, 0, ""},
+ {"(*NRGBA64).Bounds", Method, 0, ""},
+ {"(*NRGBA64).ColorModel", Method, 0, ""},
+ {"(*NRGBA64).NRGBA64At", Method, 4, ""},
+ {"(*NRGBA64).Opaque", Method, 0, ""},
+ {"(*NRGBA64).PixOffset", Method, 0, ""},
+ {"(*NRGBA64).RGBA64At", Method, 17, ""},
+ {"(*NRGBA64).Set", Method, 0, ""},
+ {"(*NRGBA64).SetNRGBA64", Method, 0, ""},
+ {"(*NRGBA64).SetRGBA64", Method, 17, ""},
+ {"(*NRGBA64).SubImage", Method, 0, ""},
+ {"(*NYCbCrA).AOffset", Method, 6, ""},
+ {"(*NYCbCrA).At", Method, 6, ""},
+ {"(*NYCbCrA).Bounds", Method, 6, ""},
+ {"(*NYCbCrA).COffset", Method, 6, ""},
+ {"(*NYCbCrA).ColorModel", Method, 6, ""},
+ {"(*NYCbCrA).NYCbCrAAt", Method, 6, ""},
+ {"(*NYCbCrA).Opaque", Method, 6, ""},
+ {"(*NYCbCrA).RGBA64At", Method, 17, ""},
+ {"(*NYCbCrA).SubImage", Method, 6, ""},
+ {"(*NYCbCrA).YCbCrAt", Method, 6, ""},
+ {"(*NYCbCrA).YOffset", Method, 6, ""},
+ {"(*Paletted).At", Method, 0, ""},
+ {"(*Paletted).Bounds", Method, 0, ""},
+ {"(*Paletted).ColorIndexAt", Method, 0, ""},
+ {"(*Paletted).ColorModel", Method, 0, ""},
+ {"(*Paletted).Opaque", Method, 0, ""},
+ {"(*Paletted).PixOffset", Method, 0, ""},
+ {"(*Paletted).RGBA64At", Method, 17, ""},
+ {"(*Paletted).Set", Method, 0, ""},
+ {"(*Paletted).SetColorIndex", Method, 0, ""},
+ {"(*Paletted).SetRGBA64", Method, 17, ""},
+ {"(*Paletted).SubImage", Method, 0, ""},
+ {"(*RGBA).At", Method, 0, ""},
+ {"(*RGBA).Bounds", Method, 0, ""},
+ {"(*RGBA).ColorModel", Method, 0, ""},
+ {"(*RGBA).Opaque", Method, 0, ""},
+ {"(*RGBA).PixOffset", Method, 0, ""},
+ {"(*RGBA).RGBA64At", Method, 17, ""},
+ {"(*RGBA).RGBAAt", Method, 4, ""},
+ {"(*RGBA).Set", Method, 0, ""},
+ {"(*RGBA).SetRGBA", Method, 0, ""},
+ {"(*RGBA).SetRGBA64", Method, 17, ""},
+ {"(*RGBA).SubImage", Method, 0, ""},
+ {"(*RGBA64).At", Method, 0, ""},
+ {"(*RGBA64).Bounds", Method, 0, ""},
+ {"(*RGBA64).ColorModel", Method, 0, ""},
+ {"(*RGBA64).Opaque", Method, 0, ""},
+ {"(*RGBA64).PixOffset", Method, 0, ""},
+ {"(*RGBA64).RGBA64At", Method, 4, ""},
+ {"(*RGBA64).Set", Method, 0, ""},
+ {"(*RGBA64).SetRGBA64", Method, 0, ""},
+ {"(*RGBA64).SubImage", Method, 0, ""},
+ {"(*Uniform).At", Method, 0, ""},
+ {"(*Uniform).Bounds", Method, 0, ""},
+ {"(*Uniform).ColorModel", Method, 0, ""},
+ {"(*Uniform).Convert", Method, 0, ""},
+ {"(*Uniform).Opaque", Method, 0, ""},
+ {"(*Uniform).RGBA", Method, 0, ""},
+ {"(*Uniform).RGBA64At", Method, 17, ""},
+ {"(*YCbCr).At", Method, 0, ""},
+ {"(*YCbCr).Bounds", Method, 0, ""},
+ {"(*YCbCr).COffset", Method, 0, ""},
+ {"(*YCbCr).ColorModel", Method, 0, ""},
+ {"(*YCbCr).Opaque", Method, 0, ""},
+ {"(*YCbCr).RGBA64At", Method, 17, ""},
+ {"(*YCbCr).SubImage", Method, 0, ""},
+ {"(*YCbCr).YCbCrAt", Method, 4, ""},
+ {"(*YCbCr).YOffset", Method, 0, ""},
+ {"(Point).Add", Method, 0, ""},
+ {"(Point).Div", Method, 0, ""},
+ {"(Point).Eq", Method, 0, ""},
+ {"(Point).In", Method, 0, ""},
+ {"(Point).Mod", Method, 0, ""},
+ {"(Point).Mul", Method, 0, ""},
+ {"(Point).String", Method, 0, ""},
+ {"(Point).Sub", Method, 0, ""},
+ {"(Rectangle).Add", Method, 0, ""},
+ {"(Rectangle).At", Method, 5, ""},
+ {"(Rectangle).Bounds", Method, 5, ""},
+ {"(Rectangle).Canon", Method, 0, ""},
+ {"(Rectangle).ColorModel", Method, 5, ""},
+ {"(Rectangle).Dx", Method, 0, ""},
+ {"(Rectangle).Dy", Method, 0, ""},
+ {"(Rectangle).Empty", Method, 0, ""},
+ {"(Rectangle).Eq", Method, 0, ""},
+ {"(Rectangle).In", Method, 0, ""},
+ {"(Rectangle).Inset", Method, 0, ""},
+ {"(Rectangle).Intersect", Method, 0, ""},
+ {"(Rectangle).Overlaps", Method, 0, ""},
+ {"(Rectangle).RGBA64At", Method, 17, ""},
+ {"(Rectangle).Size", Method, 0, ""},
+ {"(Rectangle).String", Method, 0, ""},
+ {"(Rectangle).Sub", Method, 0, ""},
+ {"(Rectangle).Union", Method, 0, ""},
+ {"(YCbCrSubsampleRatio).String", Method, 0, ""},
+ {"Alpha", Type, 0, ""},
+ {"Alpha.Pix", Field, 0, ""},
+ {"Alpha.Rect", Field, 0, ""},
+ {"Alpha.Stride", Field, 0, ""},
+ {"Alpha16", Type, 0, ""},
+ {"Alpha16.Pix", Field, 0, ""},
+ {"Alpha16.Rect", Field, 0, ""},
+ {"Alpha16.Stride", Field, 0, ""},
+ {"Black", Var, 0, ""},
+ {"CMYK", Type, 5, ""},
+ {"CMYK.Pix", Field, 5, ""},
+ {"CMYK.Rect", Field, 5, ""},
+ {"CMYK.Stride", Field, 5, ""},
+ {"Config", Type, 0, ""},
+ {"Config.ColorModel", Field, 0, ""},
+ {"Config.Height", Field, 0, ""},
+ {"Config.Width", Field, 0, ""},
+ {"Decode", Func, 0, "func(r io.Reader) (Image, string, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (Config, string, error)"},
+ {"ErrFormat", Var, 0, ""},
+ {"Gray", Type, 0, ""},
+ {"Gray.Pix", Field, 0, ""},
+ {"Gray.Rect", Field, 0, ""},
+ {"Gray.Stride", Field, 0, ""},
+ {"Gray16", Type, 0, ""},
+ {"Gray16.Pix", Field, 0, ""},
+ {"Gray16.Rect", Field, 0, ""},
+ {"Gray16.Stride", Field, 0, ""},
+ {"Image", Type, 0, ""},
+ {"NRGBA", Type, 0, ""},
+ {"NRGBA.Pix", Field, 0, ""},
+ {"NRGBA.Rect", Field, 0, ""},
+ {"NRGBA.Stride", Field, 0, ""},
+ {"NRGBA64", Type, 0, ""},
+ {"NRGBA64.Pix", Field, 0, ""},
+ {"NRGBA64.Rect", Field, 0, ""},
+ {"NRGBA64.Stride", Field, 0, ""},
+ {"NYCbCrA", Type, 6, ""},
+ {"NYCbCrA.A", Field, 6, ""},
+ {"NYCbCrA.AStride", Field, 6, ""},
+ {"NYCbCrA.YCbCr", Field, 6, ""},
+ {"NewAlpha", Func, 0, "func(r Rectangle) *Alpha"},
+ {"NewAlpha16", Func, 0, "func(r Rectangle) *Alpha16"},
+ {"NewCMYK", Func, 5, "func(r Rectangle) *CMYK"},
+ {"NewGray", Func, 0, "func(r Rectangle) *Gray"},
+ {"NewGray16", Func, 0, "func(r Rectangle) *Gray16"},
+ {"NewNRGBA", Func, 0, "func(r Rectangle) *NRGBA"},
+ {"NewNRGBA64", Func, 0, "func(r Rectangle) *NRGBA64"},
+ {"NewNYCbCrA", Func, 6, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA"},
+ {"NewPaletted", Func, 0, "func(r Rectangle, p color.Palette) *Paletted"},
+ {"NewRGBA", Func, 0, "func(r Rectangle) *RGBA"},
+ {"NewRGBA64", Func, 0, "func(r Rectangle) *RGBA64"},
+ {"NewUniform", Func, 0, "func(c color.Color) *Uniform"},
+ {"NewYCbCr", Func, 0, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr"},
+ {"Opaque", Var, 0, ""},
+ {"Paletted", Type, 0, ""},
+ {"Paletted.Palette", Field, 0, ""},
+ {"Paletted.Pix", Field, 0, ""},
+ {"Paletted.Rect", Field, 0, ""},
+ {"Paletted.Stride", Field, 0, ""},
+ {"PalettedImage", Type, 0, ""},
+ {"Point", Type, 0, ""},
+ {"Point.X", Field, 0, ""},
+ {"Point.Y", Field, 0, ""},
+ {"Pt", Func, 0, "func(X int, Y int) Point"},
+ {"RGBA", Type, 0, ""},
+ {"RGBA.Pix", Field, 0, ""},
+ {"RGBA.Rect", Field, 0, ""},
+ {"RGBA.Stride", Field, 0, ""},
+ {"RGBA64", Type, 0, ""},
+ {"RGBA64.Pix", Field, 0, ""},
+ {"RGBA64.Rect", Field, 0, ""},
+ {"RGBA64.Stride", Field, 0, ""},
+ {"RGBA64Image", Type, 17, ""},
+ {"Rect", Func, 0, "func(x0 int, y0 int, x1 int, y1 int) Rectangle"},
+ {"Rectangle", Type, 0, ""},
+ {"Rectangle.Max", Field, 0, ""},
+ {"Rectangle.Min", Field, 0, ""},
+ {"RegisterFormat", Func, 0, "func(name string, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error))"},
+ {"Transparent", Var, 0, ""},
+ {"Uniform", Type, 0, ""},
+ {"Uniform.C", Field, 0, ""},
+ {"White", Var, 0, ""},
+ {"YCbCr", Type, 0, ""},
+ {"YCbCr.CStride", Field, 0, ""},
+ {"YCbCr.Cb", Field, 0, ""},
+ {"YCbCr.Cr", Field, 0, ""},
+ {"YCbCr.Rect", Field, 0, ""},
+ {"YCbCr.SubsampleRatio", Field, 0, ""},
+ {"YCbCr.Y", Field, 0, ""},
+ {"YCbCr.YStride", Field, 0, ""},
+ {"YCbCrSubsampleRatio", Type, 0, ""},
+ {"YCbCrSubsampleRatio410", Const, 5, ""},
+ {"YCbCrSubsampleRatio411", Const, 5, ""},
+ {"YCbCrSubsampleRatio420", Const, 0, ""},
+ {"YCbCrSubsampleRatio422", Const, 0, ""},
+ {"YCbCrSubsampleRatio440", Const, 1, ""},
+ {"YCbCrSubsampleRatio444", Const, 0, ""},
+ {"ZP", Var, 0, ""},
+ {"ZR", Var, 0, ""},
+ },
+ "image/color": {
+ {"(Alpha).RGBA", Method, 0, ""},
+ {"(Alpha16).RGBA", Method, 0, ""},
+ {"(CMYK).RGBA", Method, 5, ""},
+ {"(Gray).RGBA", Method, 0, ""},
+ {"(Gray16).RGBA", Method, 0, ""},
+ {"(NRGBA).RGBA", Method, 0, ""},
+ {"(NRGBA64).RGBA", Method, 0, ""},
+ {"(NYCbCrA).RGBA", Method, 6, ""},
+ {"(Palette).Convert", Method, 0, ""},
+ {"(Palette).Index", Method, 0, ""},
+ {"(RGBA).RGBA", Method, 0, ""},
+ {"(RGBA64).RGBA", Method, 0, ""},
+ {"(YCbCr).RGBA", Method, 0, ""},
+ {"Alpha", Type, 0, ""},
+ {"Alpha.A", Field, 0, ""},
+ {"Alpha16", Type, 0, ""},
+ {"Alpha16.A", Field, 0, ""},
+ {"Alpha16Model", Var, 0, ""},
+ {"AlphaModel", Var, 0, ""},
+ {"Black", Var, 0, ""},
+ {"CMYK", Type, 5, ""},
+ {"CMYK.C", Field, 5, ""},
+ {"CMYK.K", Field, 5, ""},
+ {"CMYK.M", Field, 5, ""},
+ {"CMYK.Y", Field, 5, ""},
+ {"CMYKModel", Var, 5, ""},
+ {"CMYKToRGB", Func, 5, "func(c uint8, m uint8, y uint8, k uint8) (uint8, uint8, uint8)"},
+ {"Color", Type, 0, ""},
+ {"Gray", Type, 0, ""},
+ {"Gray.Y", Field, 0, ""},
+ {"Gray16", Type, 0, ""},
+ {"Gray16.Y", Field, 0, ""},
+ {"Gray16Model", Var, 0, ""},
+ {"GrayModel", Var, 0, ""},
+ {"Model", Type, 0, ""},
+ {"ModelFunc", Func, 0, "func(f func(Color) Color) Model"},
+ {"NRGBA", Type, 0, ""},
+ {"NRGBA.A", Field, 0, ""},
+ {"NRGBA.B", Field, 0, ""},
+ {"NRGBA.G", Field, 0, ""},
+ {"NRGBA.R", Field, 0, ""},
+ {"NRGBA64", Type, 0, ""},
+ {"NRGBA64.A", Field, 0, ""},
+ {"NRGBA64.B", Field, 0, ""},
+ {"NRGBA64.G", Field, 0, ""},
+ {"NRGBA64.R", Field, 0, ""},
+ {"NRGBA64Model", Var, 0, ""},
+ {"NRGBAModel", Var, 0, ""},
+ {"NYCbCrA", Type, 6, ""},
+ {"NYCbCrA.A", Field, 6, ""},
+ {"NYCbCrA.YCbCr", Field, 6, ""},
+ {"NYCbCrAModel", Var, 6, ""},
+ {"Opaque", Var, 0, ""},
+ {"Palette", Type, 0, ""},
+ {"RGBA", Type, 0, ""},
+ {"RGBA.A", Field, 0, ""},
+ {"RGBA.B", Field, 0, ""},
+ {"RGBA.G", Field, 0, ""},
+ {"RGBA.R", Field, 0, ""},
+ {"RGBA64", Type, 0, ""},
+ {"RGBA64.A", Field, 0, ""},
+ {"RGBA64.B", Field, 0, ""},
+ {"RGBA64.G", Field, 0, ""},
+ {"RGBA64.R", Field, 0, ""},
+ {"RGBA64Model", Var, 0, ""},
+ {"RGBAModel", Var, 0, ""},
+ {"RGBToCMYK", Func, 5, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8, uint8)"},
+ {"RGBToYCbCr", Func, 0, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8)"},
+ {"Transparent", Var, 0, ""},
+ {"White", Var, 0, ""},
+ {"YCbCr", Type, 0, ""},
+ {"YCbCr.Cb", Field, 0, ""},
+ {"YCbCr.Cr", Field, 0, ""},
+ {"YCbCr.Y", Field, 0, ""},
+ {"YCbCrModel", Var, 0, ""},
+ {"YCbCrToRGB", Func, 0, "func(y uint8, cb uint8, cr uint8) (uint8, uint8, uint8)"},
+ },
+ "image/color/palette": {
+ {"Plan9", Var, 2, ""},
+ {"WebSafe", Var, 2, ""},
+ },
+ "image/draw": {
+ {"(Op).Draw", Method, 2, ""},
+ {"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"},
+ {"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"},
+ {"Drawer", Type, 2, ""},
+ {"FloydSteinberg", Var, 2, ""},
+ {"Image", Type, 0, ""},
+ {"Op", Type, 0, ""},
+ {"Over", Const, 0, ""},
+ {"Quantizer", Type, 2, ""},
+ {"RGBA64Image", Type, 17, ""},
+ {"Src", Const, 0, ""},
+ },
+ "image/gif": {
+ {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+ {"DecodeAll", Func, 0, "func(r io.Reader) (*GIF, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+ {"DisposalBackground", Const, 5, ""},
+ {"DisposalNone", Const, 5, ""},
+ {"DisposalPrevious", Const, 5, ""},
+ {"Encode", Func, 2, "func(w io.Writer, m image.Image, o *Options) error"},
+ {"EncodeAll", Func, 2, "func(w io.Writer, g *GIF) error"},
+ {"GIF", Type, 0, ""},
+ {"GIF.BackgroundIndex", Field, 5, ""},
+ {"GIF.Config", Field, 5, ""},
+ {"GIF.Delay", Field, 0, ""},
+ {"GIF.Disposal", Field, 5, ""},
+ {"GIF.Image", Field, 0, ""},
+ {"GIF.LoopCount", Field, 0, ""},
+ {"Options", Type, 2, ""},
+ {"Options.Drawer", Field, 2, ""},
+ {"Options.NumColors", Field, 2, ""},
+ {"Options.Quantizer", Field, 2, ""},
+ },
+ "image/jpeg": {
+ {"(FormatError).Error", Method, 0, ""},
+ {"(UnsupportedError).Error", Method, 0, ""},
+ {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+ {"DefaultQuality", Const, 0, ""},
+ {"Encode", Func, 0, "func(w io.Writer, m image.Image, o *Options) error"},
+ {"FormatError", Type, 0, ""},
+ {"Options", Type, 0, ""},
+ {"Options.Quality", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"UnsupportedError", Type, 0, ""},
+ },
+ "image/png": {
+ {"(*Encoder).Encode", Method, 4, ""},
+ {"(FormatError).Error", Method, 0, ""},
+ {"(UnsupportedError).Error", Method, 0, ""},
+ {"BestCompression", Const, 4, ""},
+ {"BestSpeed", Const, 4, ""},
+ {"CompressionLevel", Type, 4, ""},
+ {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+ {"DefaultCompression", Const, 4, ""},
+ {"Encode", Func, 0, "func(w io.Writer, m image.Image) error"},
+ {"Encoder", Type, 4, ""},
+ {"Encoder.BufferPool", Field, 9, ""},
+ {"Encoder.CompressionLevel", Field, 4, ""},
+ {"EncoderBuffer", Type, 9, ""},
+ {"EncoderBufferPool", Type, 9, ""},
+ {"FormatError", Type, 0, ""},
+ {"NoCompression", Const, 4, ""},
+ {"UnsupportedError", Type, 0, ""},
+ },
+ "index/suffixarray": {
+ {"(*Index).Bytes", Method, 0, ""},
+ {"(*Index).FindAllIndex", Method, 0, ""},
+ {"(*Index).Lookup", Method, 0, ""},
+ {"(*Index).Read", Method, 0, ""},
+ {"(*Index).Write", Method, 0, ""},
+ {"Index", Type, 0, ""},
+ {"New", Func, 0, "func(data []byte) *Index"},
+ },
+ "io": {
+ {"(*LimitedReader).Read", Method, 0, ""},
+ {"(*OffsetWriter).Seek", Method, 20, ""},
+ {"(*OffsetWriter).Write", Method, 20, ""},
+ {"(*OffsetWriter).WriteAt", Method, 20, ""},
+ {"(*PipeReader).Close", Method, 0, ""},
+ {"(*PipeReader).CloseWithError", Method, 0, ""},
+ {"(*PipeReader).Read", Method, 0, ""},
+ {"(*PipeWriter).Close", Method, 0, ""},
+ {"(*PipeWriter).CloseWithError", Method, 0, ""},
+ {"(*PipeWriter).Write", Method, 0, ""},
+ {"(*SectionReader).Outer", Method, 22, ""},
+ {"(*SectionReader).Read", Method, 0, ""},
+ {"(*SectionReader).ReadAt", Method, 0, ""},
+ {"(*SectionReader).Seek", Method, 0, ""},
+ {"(*SectionReader).Size", Method, 0, ""},
+ {"ByteReader", Type, 0, ""},
+ {"ByteScanner", Type, 0, ""},
+ {"ByteWriter", Type, 1, ""},
+ {"Closer", Type, 0, ""},
+ {"Copy", Func, 0, "func(dst Writer, src Reader) (written int64, err error)"},
+ {"CopyBuffer", Func, 5, "func(dst Writer, src Reader, buf []byte) (written int64, err error)"},
+ {"CopyN", Func, 0, "func(dst Writer, src Reader, n int64) (written int64, err error)"},
+ {"Discard", Var, 16, ""},
+ {"EOF", Var, 0, ""},
+ {"ErrClosedPipe", Var, 0, ""},
+ {"ErrNoProgress", Var, 1, ""},
+ {"ErrShortBuffer", Var, 0, ""},
+ {"ErrShortWrite", Var, 0, ""},
+ {"ErrUnexpectedEOF", Var, 0, ""},
+ {"LimitReader", Func, 0, "func(r Reader, n int64) Reader"},
+ {"LimitedReader", Type, 0, ""},
+ {"LimitedReader.N", Field, 0, ""},
+ {"LimitedReader.R", Field, 0, ""},
+ {"MultiReader", Func, 0, "func(readers ...Reader) Reader"},
+ {"MultiWriter", Func, 0, "func(writers ...Writer) Writer"},
+ {"NewOffsetWriter", Func, 20, "func(w WriterAt, off int64) *OffsetWriter"},
+ {"NewSectionReader", Func, 0, "func(r ReaderAt, off int64, n int64) *SectionReader"},
+ {"NopCloser", Func, 16, "func(r Reader) ReadCloser"},
+ {"OffsetWriter", Type, 20, ""},
+ {"Pipe", Func, 0, "func() (*PipeReader, *PipeWriter)"},
+ {"PipeReader", Type, 0, ""},
+ {"PipeWriter", Type, 0, ""},
+ {"ReadAll", Func, 16, "func(r Reader) ([]byte, error)"},
+ {"ReadAtLeast", Func, 0, "func(r Reader, buf []byte, min int) (n int, err error)"},
+ {"ReadCloser", Type, 0, ""},
+ {"ReadFull", Func, 0, "func(r Reader, buf []byte) (n int, err error)"},
+ {"ReadSeekCloser", Type, 16, ""},
+ {"ReadSeeker", Type, 0, ""},
+ {"ReadWriteCloser", Type, 0, ""},
+ {"ReadWriteSeeker", Type, 0, ""},
+ {"ReadWriter", Type, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"ReaderAt", Type, 0, ""},
+ {"ReaderFrom", Type, 0, ""},
+ {"RuneReader", Type, 0, ""},
+ {"RuneScanner", Type, 0, ""},
+ {"SectionReader", Type, 0, ""},
+ {"SeekCurrent", Const, 7, ""},
+ {"SeekEnd", Const, 7, ""},
+ {"SeekStart", Const, 7, ""},
+ {"Seeker", Type, 0, ""},
+ {"StringWriter", Type, 12, ""},
+ {"TeeReader", Func, 0, "func(r Reader, w Writer) Reader"},
+ {"WriteCloser", Type, 0, ""},
+ {"WriteSeeker", Type, 0, ""},
+ {"WriteString", Func, 0, "func(w Writer, s string) (n int, err error)"},
+ {"Writer", Type, 0, ""},
+ {"WriterAt", Type, 0, ""},
+ {"WriterTo", Type, 0, ""},
+ },
+ "io/fs": {
+ {"(*PathError).Error", Method, 16, ""},
+ {"(*PathError).Timeout", Method, 16, ""},
+ {"(*PathError).Unwrap", Method, 16, ""},
+ {"(FileMode).IsDir", Method, 16, ""},
+ {"(FileMode).IsRegular", Method, 16, ""},
+ {"(FileMode).Perm", Method, 16, ""},
+ {"(FileMode).String", Method, 16, ""},
+ {"(FileMode).Type", Method, 16, ""},
+ {"DirEntry", Type, 16, ""},
+ {"ErrClosed", Var, 16, ""},
+ {"ErrExist", Var, 16, ""},
+ {"ErrInvalid", Var, 16, ""},
+ {"ErrNotExist", Var, 16, ""},
+ {"ErrPermission", Var, 16, ""},
+ {"FS", Type, 16, ""},
+ {"File", Type, 16, ""},
+ {"FileInfo", Type, 16, ""},
+ {"FileInfoToDirEntry", Func, 17, "func(info FileInfo) DirEntry"},
+ {"FileMode", Type, 16, ""},
+ {"FormatDirEntry", Func, 21, "func(dir DirEntry) string"},
+ {"FormatFileInfo", Func, 21, "func(info FileInfo) string"},
+ {"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"},
+ {"GlobFS", Type, 16, ""},
+ {"Lstat", Func, 25, "func(fsys FS, name string) (FileInfo, error)"},
+ {"ModeAppend", Const, 16, ""},
+ {"ModeCharDevice", Const, 16, ""},
+ {"ModeDevice", Const, 16, ""},
+ {"ModeDir", Const, 16, ""},
+ {"ModeExclusive", Const, 16, ""},
+ {"ModeIrregular", Const, 16, ""},
+ {"ModeNamedPipe", Const, 16, ""},
+ {"ModePerm", Const, 16, ""},
+ {"ModeSetgid", Const, 16, ""},
+ {"ModeSetuid", Const, 16, ""},
+ {"ModeSocket", Const, 16, ""},
+ {"ModeSticky", Const, 16, ""},
+ {"ModeSymlink", Const, 16, ""},
+ {"ModeTemporary", Const, 16, ""},
+ {"ModeType", Const, 16, ""},
+ {"PathError", Type, 16, ""},
+ {"PathError.Err", Field, 16, ""},
+ {"PathError.Op", Field, 16, ""},
+ {"PathError.Path", Field, 16, ""},
+ {"ReadDir", Func, 16, "func(fsys FS, name string) ([]DirEntry, error)"},
+ {"ReadDirFS", Type, 16, ""},
+ {"ReadDirFile", Type, 16, ""},
+ {"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"},
+ {"ReadFileFS", Type, 16, ""},
+ {"ReadLink", Func, 25, "func(fsys FS, name string) (string, error)"},
+ {"ReadLinkFS", Type, 25, ""},
+ {"SkipAll", Var, 20, ""},
+ {"SkipDir", Var, 16, ""},
+ {"Stat", Func, 16, "func(fsys FS, name string) (FileInfo, error)"},
+ {"StatFS", Type, 16, ""},
+ {"Sub", Func, 16, "func(fsys FS, dir string) (FS, error)"},
+ {"SubFS", Type, 16, ""},
+ {"ValidPath", Func, 16, "func(name string) bool"},
+ {"WalkDir", Func, 16, "func(fsys FS, root string, fn WalkDirFunc) error"},
+ {"WalkDirFunc", Type, 16, ""},
+ },
+ "io/ioutil": {
+ {"Discard", Var, 0, ""},
+ {"NopCloser", Func, 0, "func(r io.Reader) io.ReadCloser"},
+ {"ReadAll", Func, 0, "func(r io.Reader) ([]byte, error)"},
+ {"ReadDir", Func, 0, "func(dirname string) ([]fs.FileInfo, error)"},
+ {"ReadFile", Func, 0, "func(filename string) ([]byte, error)"},
+ {"TempDir", Func, 0, "func(dir string, pattern string) (name string, err error)"},
+ {"TempFile", Func, 0, "func(dir string, pattern string) (f *os.File, err error)"},
+ {"WriteFile", Func, 0, "func(filename string, data []byte, perm fs.FileMode) error"},
+ },
+ "iter": {
+ {"Pull", Func, 23, "func[V any](seq Seq[V]) (next func() (V, bool), stop func())"},
+ {"Pull2", Func, 23, "func[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func())"},
+ {"Seq", Type, 23, ""},
+ {"Seq2", Type, 23, ""},
+ },
+ "log": {
+ {"(*Logger).Fatal", Method, 0, ""},
+ {"(*Logger).Fatalf", Method, 0, ""},
+ {"(*Logger).Fatalln", Method, 0, ""},
+ {"(*Logger).Flags", Method, 0, ""},
+ {"(*Logger).Output", Method, 0, ""},
+ {"(*Logger).Panic", Method, 0, ""},
+ {"(*Logger).Panicf", Method, 0, ""},
+ {"(*Logger).Panicln", Method, 0, ""},
+ {"(*Logger).Prefix", Method, 0, ""},
+ {"(*Logger).Print", Method, 0, ""},
+ {"(*Logger).Printf", Method, 0, ""},
+ {"(*Logger).Println", Method, 0, ""},
+ {"(*Logger).SetFlags", Method, 0, ""},
+ {"(*Logger).SetOutput", Method, 5, ""},
+ {"(*Logger).SetPrefix", Method, 0, ""},
+ {"(*Logger).Writer", Method, 12, ""},
+ {"Default", Func, 16, "func() *Logger"},
+ {"Fatal", Func, 0, "func(v ...any)"},
+ {"Fatalf", Func, 0, "func(format string, v ...any)"},
+ {"Fatalln", Func, 0, "func(v ...any)"},
+ {"Flags", Func, 0, "func() int"},
+ {"LUTC", Const, 5, ""},
+ {"Ldate", Const, 0, ""},
+ {"Llongfile", Const, 0, ""},
+ {"Lmicroseconds", Const, 0, ""},
+ {"Lmsgprefix", Const, 14, ""},
+ {"Logger", Type, 0, ""},
+ {"Lshortfile", Const, 0, ""},
+ {"LstdFlags", Const, 0, ""},
+ {"Ltime", Const, 0, ""},
+ {"New", Func, 0, "func(out io.Writer, prefix string, flag int) *Logger"},
+ {"Output", Func, 5, "func(calldepth int, s string) error"},
+ {"Panic", Func, 0, "func(v ...any)"},
+ {"Panicf", Func, 0, "func(format string, v ...any)"},
+ {"Panicln", Func, 0, "func(v ...any)"},
+ {"Prefix", Func, 0, "func() string"},
+ {"Print", Func, 0, "func(v ...any)"},
+ {"Printf", Func, 0, "func(format string, v ...any)"},
+ {"Println", Func, 0, "func(v ...any)"},
+ {"SetFlags", Func, 0, "func(flag int)"},
+ {"SetOutput", Func, 0, "func(w io.Writer)"},
+ {"SetPrefix", Func, 0, "func(prefix string)"},
+ {"Writer", Func, 13, "func() io.Writer"},
+ },
+ "log/slog": {
+ {"(*JSONHandler).Enabled", Method, 21, ""},
+ {"(*JSONHandler).Handle", Method, 21, ""},
+ {"(*JSONHandler).WithAttrs", Method, 21, ""},
+ {"(*JSONHandler).WithGroup", Method, 21, ""},
+ {"(*Level).UnmarshalJSON", Method, 21, ""},
+ {"(*Level).UnmarshalText", Method, 21, ""},
+ {"(*LevelVar).AppendText", Method, 24, ""},
+ {"(*LevelVar).Level", Method, 21, ""},
+ {"(*LevelVar).MarshalText", Method, 21, ""},
+ {"(*LevelVar).Set", Method, 21, ""},
+ {"(*LevelVar).String", Method, 21, ""},
+ {"(*LevelVar).UnmarshalText", Method, 21, ""},
+ {"(*Logger).Debug", Method, 21, ""},
+ {"(*Logger).DebugContext", Method, 21, ""},
+ {"(*Logger).Enabled", Method, 21, ""},
+ {"(*Logger).Error", Method, 21, ""},
+ {"(*Logger).ErrorContext", Method, 21, ""},
+ {"(*Logger).Handler", Method, 21, ""},
+ {"(*Logger).Info", Method, 21, ""},
+ {"(*Logger).InfoContext", Method, 21, ""},
+ {"(*Logger).Log", Method, 21, ""},
+ {"(*Logger).LogAttrs", Method, 21, ""},
+ {"(*Logger).Warn", Method, 21, ""},
+ {"(*Logger).WarnContext", Method, 21, ""},
+ {"(*Logger).With", Method, 21, ""},
+ {"(*Logger).WithGroup", Method, 21, ""},
+ {"(*Record).Add", Method, 21, ""},
+ {"(*Record).AddAttrs", Method, 21, ""},
+ {"(*TextHandler).Enabled", Method, 21, ""},
+ {"(*TextHandler).Handle", Method, 21, ""},
+ {"(*TextHandler).WithAttrs", Method, 21, ""},
+ {"(*TextHandler).WithGroup", Method, 21, ""},
+ {"(Attr).Equal", Method, 21, ""},
+ {"(Attr).String", Method, 21, ""},
+ {"(Kind).String", Method, 21, ""},
+ {"(Level).AppendText", Method, 24, ""},
+ {"(Level).Level", Method, 21, ""},
+ {"(Level).MarshalJSON", Method, 21, ""},
+ {"(Level).MarshalText", Method, 21, ""},
+ {"(Level).String", Method, 21, ""},
+ {"(Record).Attrs", Method, 21, ""},
+ {"(Record).Clone", Method, 21, ""},
+ {"(Record).NumAttrs", Method, 21, ""},
+ {"(Record).Source", Method, 25, ""},
+ {"(Value).Any", Method, 21, ""},
+ {"(Value).Bool", Method, 21, ""},
+ {"(Value).Duration", Method, 21, ""},
+ {"(Value).Equal", Method, 21, ""},
+ {"(Value).Float64", Method, 21, ""},
+ {"(Value).Group", Method, 21, ""},
+ {"(Value).Int64", Method, 21, ""},
+ {"(Value).Kind", Method, 21, ""},
+ {"(Value).LogValuer", Method, 21, ""},
+ {"(Value).Resolve", Method, 21, ""},
+ {"(Value).String", Method, 21, ""},
+ {"(Value).Time", Method, 21, ""},
+ {"(Value).Uint64", Method, 21, ""},
+ {"Any", Func, 21, "func(key string, value any) Attr"},
+ {"AnyValue", Func, 21, "func(v any) Value"},
+ {"Attr", Type, 21, ""},
+ {"Attr.Key", Field, 21, ""},
+ {"Attr.Value", Field, 21, ""},
+ {"Bool", Func, 21, "func(key string, v bool) Attr"},
+ {"BoolValue", Func, 21, "func(v bool) Value"},
+ {"Debug", Func, 21, "func(msg string, args ...any)"},
+ {"DebugContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"Default", Func, 21, "func() *Logger"},
+ {"DiscardHandler", Var, 24, ""},
+ {"Duration", Func, 21, "func(key string, v time.Duration) Attr"},
+ {"DurationValue", Func, 21, "func(v time.Duration) Value"},
+ {"Error", Func, 21, "func(msg string, args ...any)"},
+ {"ErrorContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"Float64", Func, 21, "func(key string, v float64) Attr"},
+ {"Float64Value", Func, 21, "func(v float64) Value"},
+ {"Group", Func, 21, "func(key string, args ...any) Attr"},
+ {"GroupAttrs", Func, 25, "func(key string, attrs ...Attr) Attr"},
+ {"GroupValue", Func, 21, "func(as ...Attr) Value"},
+ {"Handler", Type, 21, ""},
+ {"HandlerOptions", Type, 21, ""},
+ {"HandlerOptions.AddSource", Field, 21, ""},
+ {"HandlerOptions.Level", Field, 21, ""},
+ {"HandlerOptions.ReplaceAttr", Field, 21, ""},
+ {"Info", Func, 21, "func(msg string, args ...any)"},
+ {"InfoContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"Int", Func, 21, "func(key string, value int) Attr"},
+ {"Int64", Func, 21, "func(key string, value int64) Attr"},
+ {"Int64Value", Func, 21, "func(v int64) Value"},
+ {"IntValue", Func, 21, "func(v int) Value"},
+ {"JSONHandler", Type, 21, ""},
+ {"Kind", Type, 21, ""},
+ {"KindAny", Const, 21, ""},
+ {"KindBool", Const, 21, ""},
+ {"KindDuration", Const, 21, ""},
+ {"KindFloat64", Const, 21, ""},
+ {"KindGroup", Const, 21, ""},
+ {"KindInt64", Const, 21, ""},
+ {"KindLogValuer", Const, 21, ""},
+ {"KindString", Const, 21, ""},
+ {"KindTime", Const, 21, ""},
+ {"KindUint64", Const, 21, ""},
+ {"Level", Type, 21, ""},
+ {"LevelDebug", Const, 21, ""},
+ {"LevelError", Const, 21, ""},
+ {"LevelInfo", Const, 21, ""},
+ {"LevelKey", Const, 21, ""},
+ {"LevelVar", Type, 21, ""},
+ {"LevelWarn", Const, 21, ""},
+ {"Leveler", Type, 21, ""},
+ {"Log", Func, 21, "func(ctx context.Context, level Level, msg string, args ...any)"},
+ {"LogAttrs", Func, 21, "func(ctx context.Context, level Level, msg string, attrs ...Attr)"},
+ {"LogValuer", Type, 21, ""},
+ {"Logger", Type, 21, ""},
+ {"MessageKey", Const, 21, ""},
+ {"New", Func, 21, "func(h Handler) *Logger"},
+ {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"},
+ {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"},
+ {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"},
+ {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"},
+ {"Record", Type, 21, ""},
+ {"Record.Level", Field, 21, ""},
+ {"Record.Message", Field, 21, ""},
+ {"Record.PC", Field, 21, ""},
+ {"Record.Time", Field, 21, ""},
+ {"SetDefault", Func, 21, "func(l *Logger)"},
+ {"SetLogLoggerLevel", Func, 22, "func(level Level) (oldLevel Level)"},
+ {"Source", Type, 21, ""},
+ {"Source.File", Field, 21, ""},
+ {"Source.Function", Field, 21, ""},
+ {"Source.Line", Field, 21, ""},
+ {"SourceKey", Const, 21, ""},
+ {"String", Func, 21, "func(key string, value string) Attr"},
+ {"StringValue", Func, 21, "func(value string) Value"},
+ {"TextHandler", Type, 21, ""},
+ {"Time", Func, 21, "func(key string, v time.Time) Attr"},
+ {"TimeKey", Const, 21, ""},
+ {"TimeValue", Func, 21, "func(v time.Time) Value"},
+ {"Uint64", Func, 21, "func(key string, v uint64) Attr"},
+ {"Uint64Value", Func, 21, "func(v uint64) Value"},
+ {"Value", Type, 21, ""},
+ {"Warn", Func, 21, "func(msg string, args ...any)"},
+ {"WarnContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"With", Func, 21, "func(args ...any) *Logger"},
+ },
+ "log/syslog": {
+ {"(*Writer).Alert", Method, 0, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Crit", Method, 0, ""},
+ {"(*Writer).Debug", Method, 0, ""},
+ {"(*Writer).Emerg", Method, 0, ""},
+ {"(*Writer).Err", Method, 0, ""},
+ {"(*Writer).Info", Method, 0, ""},
+ {"(*Writer).Notice", Method, 0, ""},
+ {"(*Writer).Warning", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"Dial", Func, 0, "func(network string, raddr string, priority Priority, tag string) (*Writer, error)"},
+ {"LOG_ALERT", Const, 0, ""},
+ {"LOG_AUTH", Const, 1, ""},
+ {"LOG_AUTHPRIV", Const, 1, ""},
+ {"LOG_CRIT", Const, 0, ""},
+ {"LOG_CRON", Const, 1, ""},
+ {"LOG_DAEMON", Const, 1, ""},
+ {"LOG_DEBUG", Const, 0, ""},
+ {"LOG_EMERG", Const, 0, ""},
+ {"LOG_ERR", Const, 0, ""},
+ {"LOG_FTP", Const, 1, ""},
+ {"LOG_INFO", Const, 0, ""},
+ {"LOG_KERN", Const, 1, ""},
+ {"LOG_LOCAL0", Const, 1, ""},
+ {"LOG_LOCAL1", Const, 1, ""},
+ {"LOG_LOCAL2", Const, 1, ""},
+ {"LOG_LOCAL3", Const, 1, ""},
+ {"LOG_LOCAL4", Const, 1, ""},
+ {"LOG_LOCAL5", Const, 1, ""},
+ {"LOG_LOCAL6", Const, 1, ""},
+ {"LOG_LOCAL7", Const, 1, ""},
+ {"LOG_LPR", Const, 1, ""},
+ {"LOG_MAIL", Const, 1, ""},
+ {"LOG_NEWS", Const, 1, ""},
+ {"LOG_NOTICE", Const, 0, ""},
+ {"LOG_SYSLOG", Const, 1, ""},
+ {"LOG_USER", Const, 1, ""},
+ {"LOG_UUCP", Const, 1, ""},
+ {"LOG_WARNING", Const, 0, ""},
+ {"New", Func, 0, "func(priority Priority, tag string) (*Writer, error)"},
+ {"NewLogger", Func, 0, "func(p Priority, logFlag int) (*log.Logger, error)"},
+ {"Priority", Type, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "maps": {
+ {"All", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq2[K, V]"},
+ {"Clone", Func, 21, "func[M ~map[K]V, K comparable, V any](m M) M"},
+ {"Collect", Func, 23, "func[K comparable, V any](seq iter.Seq2[K, V]) map[K]V"},
+ {"Copy", Func, 21, "func[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2)"},
+ {"DeleteFunc", Func, 21, "func[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool)"},
+ {"Equal", Func, 21, "func[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool"},
+ {"EqualFunc", Func, 21, "func[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool"},
+ {"Insert", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map, seq iter.Seq2[K, V])"},
+ {"Keys", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[K]"},
+ {"Values", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[V]"},
+ },
+ "math": {
+ {"Abs", Func, 0, "func(x float64) float64"},
+ {"Acos", Func, 0, "func(x float64) float64"},
+ {"Acosh", Func, 0, "func(x float64) float64"},
+ {"Asin", Func, 0, "func(x float64) float64"},
+ {"Asinh", Func, 0, "func(x float64) float64"},
+ {"Atan", Func, 0, "func(x float64) float64"},
+ {"Atan2", Func, 0, "func(y float64, x float64) float64"},
+ {"Atanh", Func, 0, "func(x float64) float64"},
+ {"Cbrt", Func, 0, "func(x float64) float64"},
+ {"Ceil", Func, 0, "func(x float64) float64"},
+ {"Copysign", Func, 0, "func(f float64, sign float64) float64"},
+ {"Cos", Func, 0, "func(x float64) float64"},
+ {"Cosh", Func, 0, "func(x float64) float64"},
+ {"Dim", Func, 0, "func(x float64, y float64) float64"},
+ {"E", Const, 0, ""},
+ {"Erf", Func, 0, "func(x float64) float64"},
+ {"Erfc", Func, 0, "func(x float64) float64"},
+ {"Erfcinv", Func, 10, "func(x float64) float64"},
+ {"Erfinv", Func, 10, "func(x float64) float64"},
+ {"Exp", Func, 0, "func(x float64) float64"},
+ {"Exp2", Func, 0, "func(x float64) float64"},
+ {"Expm1", Func, 0, "func(x float64) float64"},
+ {"FMA", Func, 14, "func(x float64, y float64, z float64) float64"},
+ {"Float32bits", Func, 0, "func(f float32) uint32"},
+ {"Float32frombits", Func, 0, "func(b uint32) float32"},
+ {"Float64bits", Func, 0, "func(f float64) uint64"},
+ {"Float64frombits", Func, 0, "func(b uint64) float64"},
+ {"Floor", Func, 0, "func(x float64) float64"},
+ {"Frexp", Func, 0, "func(f float64) (frac float64, exp int)"},
+ {"Gamma", Func, 0, "func(x float64) float64"},
+ {"Hypot", Func, 0, "func(p float64, q float64) float64"},
+ {"Ilogb", Func, 0, "func(x float64) int"},
+ {"Inf", Func, 0, "func(sign int) float64"},
+ {"IsInf", Func, 0, "func(f float64, sign int) bool"},
+ {"IsNaN", Func, 0, "func(f float64) (is bool)"},
+ {"J0", Func, 0, "func(x float64) float64"},
+ {"J1", Func, 0, "func(x float64) float64"},
+ {"Jn", Func, 0, "func(n int, x float64) float64"},
+ {"Ldexp", Func, 0, "func(frac float64, exp int) float64"},
+ {"Lgamma", Func, 0, "func(x float64) (lgamma float64, sign int)"},
+ {"Ln10", Const, 0, ""},
+ {"Ln2", Const, 0, ""},
+ {"Log", Func, 0, "func(x float64) float64"},
+ {"Log10", Func, 0, "func(x float64) float64"},
+ {"Log10E", Const, 0, ""},
+ {"Log1p", Func, 0, "func(x float64) float64"},
+ {"Log2", Func, 0, "func(x float64) float64"},
+ {"Log2E", Const, 0, ""},
+ {"Logb", Func, 0, "func(x float64) float64"},
+ {"Max", Func, 0, "func(x float64, y float64) float64"},
+ {"MaxFloat32", Const, 0, ""},
+ {"MaxFloat64", Const, 0, ""},
+ {"MaxInt", Const, 17, ""},
+ {"MaxInt16", Const, 0, ""},
+ {"MaxInt32", Const, 0, ""},
+ {"MaxInt64", Const, 0, ""},
+ {"MaxInt8", Const, 0, ""},
+ {"MaxUint", Const, 17, ""},
+ {"MaxUint16", Const, 0, ""},
+ {"MaxUint32", Const, 0, ""},
+ {"MaxUint64", Const, 0, ""},
+ {"MaxUint8", Const, 0, ""},
+ {"Min", Func, 0, "func(x float64, y float64) float64"},
+ {"MinInt", Const, 17, ""},
+ {"MinInt16", Const, 0, ""},
+ {"MinInt32", Const, 0, ""},
+ {"MinInt64", Const, 0, ""},
+ {"MinInt8", Const, 0, ""},
+ {"Mod", Func, 0, "func(x float64, y float64) float64"},
+ {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"},
+ {"NaN", Func, 0, "func() float64"},
+ {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"},
+ {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"},
+ {"Phi", Const, 0, ""},
+ {"Pi", Const, 0, ""},
+ {"Pow", Func, 0, "func(x float64, y float64) float64"},
+ {"Pow10", Func, 0, "func(n int) float64"},
+ {"Remainder", Func, 0, "func(x float64, y float64) float64"},
+ {"Round", Func, 10, "func(x float64) float64"},
+ {"RoundToEven", Func, 10, "func(x float64) float64"},
+ {"Signbit", Func, 0, "func(x float64) bool"},
+ {"Sin", Func, 0, "func(x float64) float64"},
+ {"Sincos", Func, 0, "func(x float64) (sin float64, cos float64)"},
+ {"Sinh", Func, 0, "func(x float64) float64"},
+ {"SmallestNonzeroFloat32", Const, 0, ""},
+ {"SmallestNonzeroFloat64", Const, 0, ""},
+ {"Sqrt", Func, 0, "func(x float64) float64"},
+ {"Sqrt2", Const, 0, ""},
+ {"SqrtE", Const, 0, ""},
+ {"SqrtPhi", Const, 0, ""},
+ {"SqrtPi", Const, 0, ""},
+ {"Tan", Func, 0, "func(x float64) float64"},
+ {"Tanh", Func, 0, "func(x float64) float64"},
+ {"Trunc", Func, 0, "func(x float64) float64"},
+ {"Y0", Func, 0, "func(x float64) float64"},
+ {"Y1", Func, 0, "func(x float64) float64"},
+ {"Yn", Func, 0, "func(n int, x float64) float64"},
+ },
+ "math/big": {
+ {"(*Float).Abs", Method, 5, ""},
+ {"(*Float).Acc", Method, 5, ""},
+ {"(*Float).Add", Method, 5, ""},
+ {"(*Float).Append", Method, 5, ""},
+ {"(*Float).AppendText", Method, 24, ""},
+ {"(*Float).Cmp", Method, 5, ""},
+ {"(*Float).Copy", Method, 5, ""},
+ {"(*Float).Float32", Method, 5, ""},
+ {"(*Float).Float64", Method, 5, ""},
+ {"(*Float).Format", Method, 5, ""},
+ {"(*Float).GobDecode", Method, 7, ""},
+ {"(*Float).GobEncode", Method, 7, ""},
+ {"(*Float).Int", Method, 5, ""},
+ {"(*Float).Int64", Method, 5, ""},
+ {"(*Float).IsInf", Method, 5, ""},
+ {"(*Float).IsInt", Method, 5, ""},
+ {"(*Float).MantExp", Method, 5, ""},
+ {"(*Float).MarshalText", Method, 6, ""},
+ {"(*Float).MinPrec", Method, 5, ""},
+ {"(*Float).Mode", Method, 5, ""},
+ {"(*Float).Mul", Method, 5, ""},
+ {"(*Float).Neg", Method, 5, ""},
+ {"(*Float).Parse", Method, 5, ""},
+ {"(*Float).Prec", Method, 5, ""},
+ {"(*Float).Quo", Method, 5, ""},
+ {"(*Float).Rat", Method, 5, ""},
+ {"(*Float).Scan", Method, 8, ""},
+ {"(*Float).Set", Method, 5, ""},
+ {"(*Float).SetFloat64", Method, 5, ""},
+ {"(*Float).SetInf", Method, 5, ""},
+ {"(*Float).SetInt", Method, 5, ""},
+ {"(*Float).SetInt64", Method, 5, ""},
+ {"(*Float).SetMantExp", Method, 5, ""},
+ {"(*Float).SetMode", Method, 5, ""},
+ {"(*Float).SetPrec", Method, 5, ""},
+ {"(*Float).SetRat", Method, 5, ""},
+ {"(*Float).SetString", Method, 5, ""},
+ {"(*Float).SetUint64", Method, 5, ""},
+ {"(*Float).Sign", Method, 5, ""},
+ {"(*Float).Signbit", Method, 5, ""},
+ {"(*Float).Sqrt", Method, 10, ""},
+ {"(*Float).String", Method, 5, ""},
+ {"(*Float).Sub", Method, 5, ""},
+ {"(*Float).Text", Method, 5, ""},
+ {"(*Float).Uint64", Method, 5, ""},
+ {"(*Float).UnmarshalText", Method, 6, ""},
+ {"(*Int).Abs", Method, 0, ""},
+ {"(*Int).Add", Method, 0, ""},
+ {"(*Int).And", Method, 0, ""},
+ {"(*Int).AndNot", Method, 0, ""},
+ {"(*Int).Append", Method, 6, ""},
+ {"(*Int).AppendText", Method, 24, ""},
+ {"(*Int).Binomial", Method, 0, ""},
+ {"(*Int).Bit", Method, 0, ""},
+ {"(*Int).BitLen", Method, 0, ""},
+ {"(*Int).Bits", Method, 0, ""},
+ {"(*Int).Bytes", Method, 0, ""},
+ {"(*Int).Cmp", Method, 0, ""},
+ {"(*Int).CmpAbs", Method, 10, ""},
+ {"(*Int).Div", Method, 0, ""},
+ {"(*Int).DivMod", Method, 0, ""},
+ {"(*Int).Exp", Method, 0, ""},
+ {"(*Int).FillBytes", Method, 15, ""},
+ {"(*Int).Float64", Method, 21, ""},
+ {"(*Int).Format", Method, 0, ""},
+ {"(*Int).GCD", Method, 0, ""},
+ {"(*Int).GobDecode", Method, 0, ""},
+ {"(*Int).GobEncode", Method, 0, ""},
+ {"(*Int).Int64", Method, 0, ""},
+ {"(*Int).IsInt64", Method, 9, ""},
+ {"(*Int).IsUint64", Method, 9, ""},
+ {"(*Int).Lsh", Method, 0, ""},
+ {"(*Int).MarshalJSON", Method, 1, ""},
+ {"(*Int).MarshalText", Method, 3, ""},
+ {"(*Int).Mod", Method, 0, ""},
+ {"(*Int).ModInverse", Method, 0, ""},
+ {"(*Int).ModSqrt", Method, 5, ""},
+ {"(*Int).Mul", Method, 0, ""},
+ {"(*Int).MulRange", Method, 0, ""},
+ {"(*Int).Neg", Method, 0, ""},
+ {"(*Int).Not", Method, 0, ""},
+ {"(*Int).Or", Method, 0, ""},
+ {"(*Int).ProbablyPrime", Method, 0, ""},
+ {"(*Int).Quo", Method, 0, ""},
+ {"(*Int).QuoRem", Method, 0, ""},
+ {"(*Int).Rand", Method, 0, ""},
+ {"(*Int).Rem", Method, 0, ""},
+ {"(*Int).Rsh", Method, 0, ""},
+ {"(*Int).Scan", Method, 0, ""},
+ {"(*Int).Set", Method, 0, ""},
+ {"(*Int).SetBit", Method, 0, ""},
+ {"(*Int).SetBits", Method, 0, ""},
+ {"(*Int).SetBytes", Method, 0, ""},
+ {"(*Int).SetInt64", Method, 0, ""},
+ {"(*Int).SetString", Method, 0, ""},
+ {"(*Int).SetUint64", Method, 1, ""},
+ {"(*Int).Sign", Method, 0, ""},
+ {"(*Int).Sqrt", Method, 8, ""},
+ {"(*Int).String", Method, 0, ""},
+ {"(*Int).Sub", Method, 0, ""},
+ {"(*Int).Text", Method, 6, ""},
+ {"(*Int).TrailingZeroBits", Method, 13, ""},
+ {"(*Int).Uint64", Method, 1, ""},
+ {"(*Int).UnmarshalJSON", Method, 1, ""},
+ {"(*Int).UnmarshalText", Method, 3, ""},
+ {"(*Int).Xor", Method, 0, ""},
+ {"(*Rat).Abs", Method, 0, ""},
+ {"(*Rat).Add", Method, 0, ""},
+ {"(*Rat).AppendText", Method, 24, ""},
+ {"(*Rat).Cmp", Method, 0, ""},
+ {"(*Rat).Denom", Method, 0, ""},
+ {"(*Rat).Float32", Method, 4, ""},
+ {"(*Rat).Float64", Method, 1, ""},
+ {"(*Rat).FloatPrec", Method, 22, ""},
+ {"(*Rat).FloatString", Method, 0, ""},
+ {"(*Rat).GobDecode", Method, 0, ""},
+ {"(*Rat).GobEncode", Method, 0, ""},
+ {"(*Rat).Inv", Method, 0, ""},
+ {"(*Rat).IsInt", Method, 0, ""},
+ {"(*Rat).MarshalText", Method, 3, ""},
+ {"(*Rat).Mul", Method, 0, ""},
+ {"(*Rat).Neg", Method, 0, ""},
+ {"(*Rat).Num", Method, 0, ""},
+ {"(*Rat).Quo", Method, 0, ""},
+ {"(*Rat).RatString", Method, 0, ""},
+ {"(*Rat).Scan", Method, 0, ""},
+ {"(*Rat).Set", Method, 0, ""},
+ {"(*Rat).SetFloat64", Method, 1, ""},
+ {"(*Rat).SetFrac", Method, 0, ""},
+ {"(*Rat).SetFrac64", Method, 0, ""},
+ {"(*Rat).SetInt", Method, 0, ""},
+ {"(*Rat).SetInt64", Method, 0, ""},
+ {"(*Rat).SetString", Method, 0, ""},
+ {"(*Rat).SetUint64", Method, 13, ""},
+ {"(*Rat).Sign", Method, 0, ""},
+ {"(*Rat).String", Method, 0, ""},
+ {"(*Rat).Sub", Method, 0, ""},
+ {"(*Rat).UnmarshalText", Method, 3, ""},
+ {"(Accuracy).String", Method, 5, ""},
+ {"(ErrNaN).Error", Method, 5, ""},
+ {"(RoundingMode).String", Method, 5, ""},
+ {"Above", Const, 5, ""},
+ {"Accuracy", Type, 5, ""},
+ {"AwayFromZero", Const, 5, ""},
+ {"Below", Const, 5, ""},
+ {"ErrNaN", Type, 5, ""},
+ {"Exact", Const, 5, ""},
+ {"Float", Type, 5, ""},
+ {"Int", Type, 0, ""},
+ {"Jacobi", Func, 5, "func(x *Int, y *Int) int"},
+ {"MaxBase", Const, 0, ""},
+ {"MaxExp", Const, 5, ""},
+ {"MaxPrec", Const, 5, ""},
+ {"MinExp", Const, 5, ""},
+ {"NewFloat", Func, 5, "func(x float64) *Float"},
+ {"NewInt", Func, 0, "func(x int64) *Int"},
+ {"NewRat", Func, 0, "func(a int64, b int64) *Rat"},
+ {"ParseFloat", Func, 5, "func(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error)"},
+ {"Rat", Type, 0, ""},
+ {"RoundingMode", Type, 5, ""},
+ {"ToNearestAway", Const, 5, ""},
+ {"ToNearestEven", Const, 5, ""},
+ {"ToNegativeInf", Const, 5, ""},
+ {"ToPositiveInf", Const, 5, ""},
+ {"ToZero", Const, 5, ""},
+ {"Word", Type, 0, ""},
+ },
+ "math/bits": {
+ {"Add", Func, 12, "func(x uint, y uint, carry uint) (sum uint, carryOut uint)"},
+ {"Add32", Func, 12, "func(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32)"},
+ {"Add64", Func, 12, "func(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64)"},
+ {"Div", Func, 12, "func(hi uint, lo uint, y uint) (quo uint, rem uint)"},
+ {"Div32", Func, 12, "func(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32)"},
+ {"Div64", Func, 12, "func(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64)"},
+ {"LeadingZeros", Func, 9, "func(x uint) int"},
+ {"LeadingZeros16", Func, 9, "func(x uint16) int"},
+ {"LeadingZeros32", Func, 9, "func(x uint32) int"},
+ {"LeadingZeros64", Func, 9, "func(x uint64) int"},
+ {"LeadingZeros8", Func, 9, "func(x uint8) int"},
+ {"Len", Func, 9, "func(x uint) int"},
+ {"Len16", Func, 9, "func(x uint16) (n int)"},
+ {"Len32", Func, 9, "func(x uint32) (n int)"},
+ {"Len64", Func, 9, "func(x uint64) (n int)"},
+ {"Len8", Func, 9, "func(x uint8) int"},
+ {"Mul", Func, 12, "func(x uint, y uint) (hi uint, lo uint)"},
+ {"Mul32", Func, 12, "func(x uint32, y uint32) (hi uint32, lo uint32)"},
+ {"Mul64", Func, 12, "func(x uint64, y uint64) (hi uint64, lo uint64)"},
+ {"OnesCount", Func, 9, "func(x uint) int"},
+ {"OnesCount16", Func, 9, "func(x uint16) int"},
+ {"OnesCount32", Func, 9, "func(x uint32) int"},
+ {"OnesCount64", Func, 9, "func(x uint64) int"},
+ {"OnesCount8", Func, 9, "func(x uint8) int"},
+ {"Rem", Func, 14, "func(hi uint, lo uint, y uint) uint"},
+ {"Rem32", Func, 14, "func(hi uint32, lo uint32, y uint32) uint32"},
+ {"Rem64", Func, 14, "func(hi uint64, lo uint64, y uint64) uint64"},
+ {"Reverse", Func, 9, "func(x uint) uint"},
+ {"Reverse16", Func, 9, "func(x uint16) uint16"},
+ {"Reverse32", Func, 9, "func(x uint32) uint32"},
+ {"Reverse64", Func, 9, "func(x uint64) uint64"},
+ {"Reverse8", Func, 9, "func(x uint8) uint8"},
+ {"ReverseBytes", Func, 9, "func(x uint) uint"},
+ {"ReverseBytes16", Func, 9, "func(x uint16) uint16"},
+ {"ReverseBytes32", Func, 9, "func(x uint32) uint32"},
+ {"ReverseBytes64", Func, 9, "func(x uint64) uint64"},
+ {"RotateLeft", Func, 9, "func(x uint, k int) uint"},
+ {"RotateLeft16", Func, 9, "func(x uint16, k int) uint16"},
+ {"RotateLeft32", Func, 9, "func(x uint32, k int) uint32"},
+ {"RotateLeft64", Func, 9, "func(x uint64, k int) uint64"},
+ {"RotateLeft8", Func, 9, "func(x uint8, k int) uint8"},
+ {"Sub", Func, 12, "func(x uint, y uint, borrow uint) (diff uint, borrowOut uint)"},
+ {"Sub32", Func, 12, "func(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32)"},
+ {"Sub64", Func, 12, "func(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64)"},
+ {"TrailingZeros", Func, 9, "func(x uint) int"},
+ {"TrailingZeros16", Func, 9, "func(x uint16) int"},
+ {"TrailingZeros32", Func, 9, "func(x uint32) int"},
+ {"TrailingZeros64", Func, 9, "func(x uint64) int"},
+ {"TrailingZeros8", Func, 9, "func(x uint8) int"},
+ {"UintSize", Const, 9, ""},
+ },
+ "math/cmplx": {
+ {"Abs", Func, 0, "func(x complex128) float64"},
+ {"Acos", Func, 0, "func(x complex128) complex128"},
+ {"Acosh", Func, 0, "func(x complex128) complex128"},
+ {"Asin", Func, 0, "func(x complex128) complex128"},
+ {"Asinh", Func, 0, "func(x complex128) complex128"},
+ {"Atan", Func, 0, "func(x complex128) complex128"},
+ {"Atanh", Func, 0, "func(x complex128) complex128"},
+ {"Conj", Func, 0, "func(x complex128) complex128"},
+ {"Cos", Func, 0, "func(x complex128) complex128"},
+ {"Cosh", Func, 0, "func(x complex128) complex128"},
+ {"Cot", Func, 0, "func(x complex128) complex128"},
+ {"Exp", Func, 0, "func(x complex128) complex128"},
+ {"Inf", Func, 0, "func() complex128"},
+ {"IsInf", Func, 0, "func(x complex128) bool"},
+ {"IsNaN", Func, 0, "func(x complex128) bool"},
+ {"Log", Func, 0, "func(x complex128) complex128"},
+ {"Log10", Func, 0, "func(x complex128) complex128"},
+ {"NaN", Func, 0, "func() complex128"},
+ {"Phase", Func, 0, "func(x complex128) float64"},
+ {"Polar", Func, 0, "func(x complex128) (r float64, θ float64)"},
+ {"Pow", Func, 0, "func(x complex128, y complex128) complex128"},
+ {"Rect", Func, 0, "func(r float64, θ float64) complex128"},
+ {"Sin", Func, 0, "func(x complex128) complex128"},
+ {"Sinh", Func, 0, "func(x complex128) complex128"},
+ {"Sqrt", Func, 0, "func(x complex128) complex128"},
+ {"Tan", Func, 0, "func(x complex128) complex128"},
+ {"Tanh", Func, 0, "func(x complex128) complex128"},
+ },
+ "math/rand": {
+ {"(*Rand).ExpFloat64", Method, 0, ""},
+ {"(*Rand).Float32", Method, 0, ""},
+ {"(*Rand).Float64", Method, 0, ""},
+ {"(*Rand).Int", Method, 0, ""},
+ {"(*Rand).Int31", Method, 0, ""},
+ {"(*Rand).Int31n", Method, 0, ""},
+ {"(*Rand).Int63", Method, 0, ""},
+ {"(*Rand).Int63n", Method, 0, ""},
+ {"(*Rand).Intn", Method, 0, ""},
+ {"(*Rand).NormFloat64", Method, 0, ""},
+ {"(*Rand).Perm", Method, 0, ""},
+ {"(*Rand).Read", Method, 6, ""},
+ {"(*Rand).Seed", Method, 0, ""},
+ {"(*Rand).Shuffle", Method, 10, ""},
+ {"(*Rand).Uint32", Method, 0, ""},
+ {"(*Rand).Uint64", Method, 8, ""},
+ {"(*Zipf).Uint64", Method, 0, ""},
+ {"ExpFloat64", Func, 0, "func() float64"},
+ {"Float32", Func, 0, "func() float32"},
+ {"Float64", Func, 0, "func() float64"},
+ {"Int", Func, 0, "func() int"},
+ {"Int31", Func, 0, "func() int32"},
+ {"Int31n", Func, 0, "func(n int32) int32"},
+ {"Int63", Func, 0, "func() int64"},
+ {"Int63n", Func, 0, "func(n int64) int64"},
+ {"Intn", Func, 0, "func(n int) int"},
+ {"New", Func, 0, "func(src Source) *Rand"},
+ {"NewSource", Func, 0, "func(seed int64) Source"},
+ {"NewZipf", Func, 0, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
+ {"NormFloat64", Func, 0, "func() float64"},
+ {"Perm", Func, 0, "func(n int) []int"},
+ {"Rand", Type, 0, ""},
+ {"Read", Func, 6, "func(p []byte) (n int, err error)"},
+ {"Seed", Func, 0, "func(seed int64)"},
+ {"Shuffle", Func, 10, "func(n int, swap func(i int, j int))"},
+ {"Source", Type, 0, ""},
+ {"Source64", Type, 8, ""},
+ {"Uint32", Func, 0, "func() uint32"},
+ {"Uint64", Func, 8, "func() uint64"},
+ {"Zipf", Type, 0, ""},
+ },
+ "math/rand/v2": {
+ {"(*ChaCha8).AppendBinary", Method, 24, ""},
+ {"(*ChaCha8).MarshalBinary", Method, 22, ""},
+ {"(*ChaCha8).Read", Method, 23, ""},
+ {"(*ChaCha8).Seed", Method, 22, ""},
+ {"(*ChaCha8).Uint64", Method, 22, ""},
+ {"(*ChaCha8).UnmarshalBinary", Method, 22, ""},
+ {"(*PCG).AppendBinary", Method, 24, ""},
+ {"(*PCG).MarshalBinary", Method, 22, ""},
+ {"(*PCG).Seed", Method, 22, ""},
+ {"(*PCG).Uint64", Method, 22, ""},
+ {"(*PCG).UnmarshalBinary", Method, 22, ""},
+ {"(*Rand).ExpFloat64", Method, 22, ""},
+ {"(*Rand).Float32", Method, 22, ""},
+ {"(*Rand).Float64", Method, 22, ""},
+ {"(*Rand).Int", Method, 22, ""},
+ {"(*Rand).Int32", Method, 22, ""},
+ {"(*Rand).Int32N", Method, 22, ""},
+ {"(*Rand).Int64", Method, 22, ""},
+ {"(*Rand).Int64N", Method, 22, ""},
+ {"(*Rand).IntN", Method, 22, ""},
+ {"(*Rand).NormFloat64", Method, 22, ""},
+ {"(*Rand).Perm", Method, 22, ""},
+ {"(*Rand).Shuffle", Method, 22, ""},
+ {"(*Rand).Uint", Method, 23, ""},
+ {"(*Rand).Uint32", Method, 22, ""},
+ {"(*Rand).Uint32N", Method, 22, ""},
+ {"(*Rand).Uint64", Method, 22, ""},
+ {"(*Rand).Uint64N", Method, 22, ""},
+ {"(*Rand).UintN", Method, 22, ""},
+ {"(*Zipf).Uint64", Method, 22, ""},
+ {"ChaCha8", Type, 22, ""},
+ {"ExpFloat64", Func, 22, "func() float64"},
+ {"Float32", Func, 22, "func() float32"},
+ {"Float64", Func, 22, "func() float64"},
+ {"Int", Func, 22, "func() int"},
+ {"Int32", Func, 22, "func() int32"},
+ {"Int32N", Func, 22, "func(n int32) int32"},
+ {"Int64", Func, 22, "func() int64"},
+ {"Int64N", Func, 22, "func(n int64) int64"},
+ {"IntN", Func, 22, "func(n int) int"},
+ {"N", Func, 22, "func[Int intType](n Int) Int"},
+ {"New", Func, 22, "func(src Source) *Rand"},
+ {"NewChaCha8", Func, 22, "func(seed [32]byte) *ChaCha8"},
+ {"NewPCG", Func, 22, "func(seed1 uint64, seed2 uint64) *PCG"},
+ {"NewZipf", Func, 22, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
+ {"NormFloat64", Func, 22, "func() float64"},
+ {"PCG", Type, 22, ""},
+ {"Perm", Func, 22, "func(n int) []int"},
+ {"Rand", Type, 22, ""},
+ {"Shuffle", Func, 22, "func(n int, swap func(i int, j int))"},
+ {"Source", Type, 22, ""},
+ {"Uint", Func, 23, "func() uint"},
+ {"Uint32", Func, 22, "func() uint32"},
+ {"Uint32N", Func, 22, "func(n uint32) uint32"},
+ {"Uint64", Func, 22, "func() uint64"},
+ {"Uint64N", Func, 22, "func(n uint64) uint64"},
+ {"UintN", Func, 22, "func(n uint) uint"},
+ {"Zipf", Type, 22, ""},
+ },
+ "mime": {
+ {"(*WordDecoder).Decode", Method, 5, ""},
+ {"(*WordDecoder).DecodeHeader", Method, 5, ""},
+ {"(WordEncoder).Encode", Method, 5, ""},
+ {"AddExtensionType", Func, 0, "func(ext string, typ string) error"},
+ {"BEncoding", Const, 5, ""},
+ {"ErrInvalidMediaParameter", Var, 9, ""},
+ {"ExtensionsByType", Func, 5, "func(typ string) ([]string, error)"},
+ {"FormatMediaType", Func, 0, "func(t string, param map[string]string) string"},
+ {"ParseMediaType", Func, 0, "func(v string) (mediatype string, params map[string]string, err error)"},
+ {"QEncoding", Const, 5, ""},
+ {"TypeByExtension", Func, 0, "func(ext string) string"},
+ {"WordDecoder", Type, 5, ""},
+ {"WordDecoder.CharsetReader", Field, 5, ""},
+ {"WordEncoder", Type, 5, ""},
+ },
+ "mime/multipart": {
+ {"(*FileHeader).Open", Method, 0, ""},
+ {"(*Form).RemoveAll", Method, 0, ""},
+ {"(*Part).Close", Method, 0, ""},
+ {"(*Part).FileName", Method, 0, ""},
+ {"(*Part).FormName", Method, 0, ""},
+ {"(*Part).Read", Method, 0, ""},
+ {"(*Reader).NextPart", Method, 0, ""},
+ {"(*Reader).NextRawPart", Method, 14, ""},
+ {"(*Reader).ReadForm", Method, 0, ""},
+ {"(*Writer).Boundary", Method, 0, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).CreateFormField", Method, 0, ""},
+ {"(*Writer).CreateFormFile", Method, 0, ""},
+ {"(*Writer).CreatePart", Method, 0, ""},
+ {"(*Writer).FormDataContentType", Method, 0, ""},
+ {"(*Writer).SetBoundary", Method, 1, ""},
+ {"(*Writer).WriteField", Method, 0, ""},
+ {"ErrMessageTooLarge", Var, 9, ""},
+ {"File", Type, 0, ""},
+ {"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.Filename", Field, 0, ""},
+ {"FileHeader.Header", Field, 0, ""},
+ {"FileHeader.Size", Field, 9, ""},
+ {"Form", Type, 0, ""},
+ {"Form.File", Field, 0, ""},
+ {"Form.Value", Field, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader, boundary string) *Reader"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"Part", Type, 0, ""},
+ {"Part.Header", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "mime/quotedprintable": {
+ {"(*Reader).Read", Method, 5, ""},
+ {"(*Writer).Close", Method, 5, ""},
+ {"(*Writer).Write", Method, 5, ""},
+ {"NewReader", Func, 5, "func(r io.Reader) *Reader"},
+ {"NewWriter", Func, 5, "func(w io.Writer) *Writer"},
+ {"Reader", Type, 5, ""},
+ {"Writer", Type, 5, ""},
+ {"Writer.Binary", Field, 5, ""},
+ },
+ "net": {
+ {"(*AddrError).Error", Method, 0, ""},
+ {"(*AddrError).Temporary", Method, 0, ""},
+ {"(*AddrError).Timeout", Method, 0, ""},
+ {"(*Buffers).Read", Method, 8, ""},
+ {"(*Buffers).WriteTo", Method, 8, ""},
+ {"(*DNSConfigError).Error", Method, 0, ""},
+ {"(*DNSConfigError).Temporary", Method, 0, ""},
+ {"(*DNSConfigError).Timeout", Method, 0, ""},
+ {"(*DNSConfigError).Unwrap", Method, 13, ""},
+ {"(*DNSError).Error", Method, 0, ""},
+ {"(*DNSError).Temporary", Method, 0, ""},
+ {"(*DNSError).Timeout", Method, 0, ""},
+ {"(*DNSError).Unwrap", Method, 23, ""},
+ {"(*Dialer).Dial", Method, 1, ""},
+ {"(*Dialer).DialContext", Method, 7, ""},
+ {"(*Dialer).MultipathTCP", Method, 21, ""},
+ {"(*Dialer).SetMultipathTCP", Method, 21, ""},
+ {"(*IP).UnmarshalText", Method, 2, ""},
+ {"(*IPAddr).Network", Method, 0, ""},
+ {"(*IPAddr).String", Method, 0, ""},
+ {"(*IPConn).Close", Method, 0, ""},
+ {"(*IPConn).File", Method, 0, ""},
+ {"(*IPConn).LocalAddr", Method, 0, ""},
+ {"(*IPConn).Read", Method, 0, ""},
+ {"(*IPConn).ReadFrom", Method, 0, ""},
+ {"(*IPConn).ReadFromIP", Method, 0, ""},
+ {"(*IPConn).ReadMsgIP", Method, 1, ""},
+ {"(*IPConn).RemoteAddr", Method, 0, ""},
+ {"(*IPConn).SetDeadline", Method, 0, ""},
+ {"(*IPConn).SetReadBuffer", Method, 0, ""},
+ {"(*IPConn).SetReadDeadline", Method, 0, ""},
+ {"(*IPConn).SetWriteBuffer", Method, 0, ""},
+ {"(*IPConn).SetWriteDeadline", Method, 0, ""},
+ {"(*IPConn).SyscallConn", Method, 9, ""},
+ {"(*IPConn).Write", Method, 0, ""},
+ {"(*IPConn).WriteMsgIP", Method, 1, ""},
+ {"(*IPConn).WriteTo", Method, 0, ""},
+ {"(*IPConn).WriteToIP", Method, 0, ""},
+ {"(*IPNet).Contains", Method, 0, ""},
+ {"(*IPNet).Network", Method, 0, ""},
+ {"(*IPNet).String", Method, 0, ""},
+ {"(*Interface).Addrs", Method, 0, ""},
+ {"(*Interface).MulticastAddrs", Method, 0, ""},
+ {"(*ListenConfig).Listen", Method, 11, ""},
+ {"(*ListenConfig).ListenPacket", Method, 11, ""},
+ {"(*ListenConfig).MultipathTCP", Method, 21, ""},
+ {"(*ListenConfig).SetMultipathTCP", Method, 21, ""},
+ {"(*OpError).Error", Method, 0, ""},
+ {"(*OpError).Temporary", Method, 0, ""},
+ {"(*OpError).Timeout", Method, 0, ""},
+ {"(*OpError).Unwrap", Method, 13, ""},
+ {"(*ParseError).Error", Method, 0, ""},
+ {"(*ParseError).Temporary", Method, 17, ""},
+ {"(*ParseError).Timeout", Method, 17, ""},
+ {"(*Resolver).LookupAddr", Method, 8, ""},
+ {"(*Resolver).LookupCNAME", Method, 8, ""},
+ {"(*Resolver).LookupHost", Method, 8, ""},
+ {"(*Resolver).LookupIP", Method, 15, ""},
+ {"(*Resolver).LookupIPAddr", Method, 8, ""},
+ {"(*Resolver).LookupMX", Method, 8, ""},
+ {"(*Resolver).LookupNS", Method, 8, ""},
+ {"(*Resolver).LookupNetIP", Method, 18, ""},
+ {"(*Resolver).LookupPort", Method, 8, ""},
+ {"(*Resolver).LookupSRV", Method, 8, ""},
+ {"(*Resolver).LookupTXT", Method, 8, ""},
+ {"(*TCPAddr).AddrPort", Method, 18, ""},
+ {"(*TCPAddr).Network", Method, 0, ""},
+ {"(*TCPAddr).String", Method, 0, ""},
+ {"(*TCPConn).Close", Method, 0, ""},
+ {"(*TCPConn).CloseRead", Method, 0, ""},
+ {"(*TCPConn).CloseWrite", Method, 0, ""},
+ {"(*TCPConn).File", Method, 0, ""},
+ {"(*TCPConn).LocalAddr", Method, 0, ""},
+ {"(*TCPConn).MultipathTCP", Method, 21, ""},
+ {"(*TCPConn).Read", Method, 0, ""},
+ {"(*TCPConn).ReadFrom", Method, 0, ""},
+ {"(*TCPConn).RemoteAddr", Method, 0, ""},
+ {"(*TCPConn).SetDeadline", Method, 0, ""},
+ {"(*TCPConn).SetKeepAlive", Method, 0, ""},
+ {"(*TCPConn).SetKeepAliveConfig", Method, 23, ""},
+ {"(*TCPConn).SetKeepAlivePeriod", Method, 2, ""},
+ {"(*TCPConn).SetLinger", Method, 0, ""},
+ {"(*TCPConn).SetNoDelay", Method, 0, ""},
+ {"(*TCPConn).SetReadBuffer", Method, 0, ""},
+ {"(*TCPConn).SetReadDeadline", Method, 0, ""},
+ {"(*TCPConn).SetWriteBuffer", Method, 0, ""},
+ {"(*TCPConn).SetWriteDeadline", Method, 0, ""},
+ {"(*TCPConn).SyscallConn", Method, 9, ""},
+ {"(*TCPConn).Write", Method, 0, ""},
+ {"(*TCPConn).WriteTo", Method, 22, ""},
+ {"(*TCPListener).Accept", Method, 0, ""},
+ {"(*TCPListener).AcceptTCP", Method, 0, ""},
+ {"(*TCPListener).Addr", Method, 0, ""},
+ {"(*TCPListener).Close", Method, 0, ""},
+ {"(*TCPListener).File", Method, 0, ""},
+ {"(*TCPListener).SetDeadline", Method, 0, ""},
+ {"(*TCPListener).SyscallConn", Method, 10, ""},
+ {"(*UDPAddr).AddrPort", Method, 18, ""},
+ {"(*UDPAddr).Network", Method, 0, ""},
+ {"(*UDPAddr).String", Method, 0, ""},
+ {"(*UDPConn).Close", Method, 0, ""},
+ {"(*UDPConn).File", Method, 0, ""},
+ {"(*UDPConn).LocalAddr", Method, 0, ""},
+ {"(*UDPConn).Read", Method, 0, ""},
+ {"(*UDPConn).ReadFrom", Method, 0, ""},
+ {"(*UDPConn).ReadFromUDP", Method, 0, ""},
+ {"(*UDPConn).ReadFromUDPAddrPort", Method, 18, ""},
+ {"(*UDPConn).ReadMsgUDP", Method, 1, ""},
+ {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18, ""},
+ {"(*UDPConn).RemoteAddr", Method, 0, ""},
+ {"(*UDPConn).SetDeadline", Method, 0, ""},
+ {"(*UDPConn).SetReadBuffer", Method, 0, ""},
+ {"(*UDPConn).SetReadDeadline", Method, 0, ""},
+ {"(*UDPConn).SetWriteBuffer", Method, 0, ""},
+ {"(*UDPConn).SetWriteDeadline", Method, 0, ""},
+ {"(*UDPConn).SyscallConn", Method, 9, ""},
+ {"(*UDPConn).Write", Method, 0, ""},
+ {"(*UDPConn).WriteMsgUDP", Method, 1, ""},
+ {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18, ""},
+ {"(*UDPConn).WriteTo", Method, 0, ""},
+ {"(*UDPConn).WriteToUDP", Method, 0, ""},
+ {"(*UDPConn).WriteToUDPAddrPort", Method, 18, ""},
+ {"(*UnixAddr).Network", Method, 0, ""},
+ {"(*UnixAddr).String", Method, 0, ""},
+ {"(*UnixConn).Close", Method, 0, ""},
+ {"(*UnixConn).CloseRead", Method, 1, ""},
+ {"(*UnixConn).CloseWrite", Method, 1, ""},
+ {"(*UnixConn).File", Method, 0, ""},
+ {"(*UnixConn).LocalAddr", Method, 0, ""},
+ {"(*UnixConn).Read", Method, 0, ""},
+ {"(*UnixConn).ReadFrom", Method, 0, ""},
+ {"(*UnixConn).ReadFromUnix", Method, 0, ""},
+ {"(*UnixConn).ReadMsgUnix", Method, 0, ""},
+ {"(*UnixConn).RemoteAddr", Method, 0, ""},
+ {"(*UnixConn).SetDeadline", Method, 0, ""},
+ {"(*UnixConn).SetReadBuffer", Method, 0, ""},
+ {"(*UnixConn).SetReadDeadline", Method, 0, ""},
+ {"(*UnixConn).SetWriteBuffer", Method, 0, ""},
+ {"(*UnixConn).SetWriteDeadline", Method, 0, ""},
+ {"(*UnixConn).SyscallConn", Method, 9, ""},
+ {"(*UnixConn).Write", Method, 0, ""},
+ {"(*UnixConn).WriteMsgUnix", Method, 0, ""},
+ {"(*UnixConn).WriteTo", Method, 0, ""},
+ {"(*UnixConn).WriteToUnix", Method, 0, ""},
+ {"(*UnixListener).Accept", Method, 0, ""},
+ {"(*UnixListener).AcceptUnix", Method, 0, ""},
+ {"(*UnixListener).Addr", Method, 0, ""},
+ {"(*UnixListener).Close", Method, 0, ""},
+ {"(*UnixListener).File", Method, 0, ""},
+ {"(*UnixListener).SetDeadline", Method, 0, ""},
+ {"(*UnixListener).SetUnlinkOnClose", Method, 8, ""},
+ {"(*UnixListener).SyscallConn", Method, 10, ""},
+ {"(Flags).String", Method, 0, ""},
+ {"(HardwareAddr).String", Method, 0, ""},
+ {"(IP).AppendText", Method, 24, ""},
+ {"(IP).DefaultMask", Method, 0, ""},
+ {"(IP).Equal", Method, 0, ""},
+ {"(IP).IsGlobalUnicast", Method, 0, ""},
+ {"(IP).IsInterfaceLocalMulticast", Method, 0, ""},
+ {"(IP).IsLinkLocalMulticast", Method, 0, ""},
+ {"(IP).IsLinkLocalUnicast", Method, 0, ""},
+ {"(IP).IsLoopback", Method, 0, ""},
+ {"(IP).IsMulticast", Method, 0, ""},
+ {"(IP).IsPrivate", Method, 17, ""},
+ {"(IP).IsUnspecified", Method, 0, ""},
+ {"(IP).MarshalText", Method, 2, ""},
+ {"(IP).Mask", Method, 0, ""},
+ {"(IP).String", Method, 0, ""},
+ {"(IP).To16", Method, 0, ""},
+ {"(IP).To4", Method, 0, ""},
+ {"(IPMask).Size", Method, 0, ""},
+ {"(IPMask).String", Method, 0, ""},
+ {"(InvalidAddrError).Error", Method, 0, ""},
+ {"(InvalidAddrError).Temporary", Method, 0, ""},
+ {"(InvalidAddrError).Timeout", Method, 0, ""},
+ {"(UnknownNetworkError).Error", Method, 0, ""},
+ {"(UnknownNetworkError).Temporary", Method, 0, ""},
+ {"(UnknownNetworkError).Timeout", Method, 0, ""},
+ {"Addr", Type, 0, ""},
+ {"AddrError", Type, 0, ""},
+ {"AddrError.Addr", Field, 0, ""},
+ {"AddrError.Err", Field, 0, ""},
+ {"Buffers", Type, 8, ""},
+ {"CIDRMask", Func, 0, "func(ones int, bits int) IPMask"},
+ {"Conn", Type, 0, ""},
+ {"DNSConfigError", Type, 0, ""},
+ {"DNSConfigError.Err", Field, 0, ""},
+ {"DNSError", Type, 0, ""},
+ {"DNSError.Err", Field, 0, ""},
+ {"DNSError.IsNotFound", Field, 13, ""},
+ {"DNSError.IsTemporary", Field, 6, ""},
+ {"DNSError.IsTimeout", Field, 0, ""},
+ {"DNSError.Name", Field, 0, ""},
+ {"DNSError.Server", Field, 0, ""},
+ {"DNSError.UnwrapErr", Field, 23, ""},
+ {"DefaultResolver", Var, 8, ""},
+ {"Dial", Func, 0, "func(network string, address string) (Conn, error)"},
+ {"DialIP", Func, 0, "func(network string, laddr *IPAddr, raddr *IPAddr) (*IPConn, error)"},
+ {"DialTCP", Func, 0, "func(network string, laddr *TCPAddr, raddr *TCPAddr) (*TCPConn, error)"},
+ {"DialTimeout", Func, 0, "func(network string, address string, timeout time.Duration) (Conn, error)"},
+ {"DialUDP", Func, 0, "func(network string, laddr *UDPAddr, raddr *UDPAddr) (*UDPConn, error)"},
+ {"DialUnix", Func, 0, "func(network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error)"},
+ {"Dialer", Type, 1, ""},
+ {"Dialer.Cancel", Field, 6, ""},
+ {"Dialer.Control", Field, 11, ""},
+ {"Dialer.ControlContext", Field, 20, ""},
+ {"Dialer.Deadline", Field, 1, ""},
+ {"Dialer.DualStack", Field, 2, ""},
+ {"Dialer.FallbackDelay", Field, 5, ""},
+ {"Dialer.KeepAlive", Field, 3, ""},
+ {"Dialer.KeepAliveConfig", Field, 23, ""},
+ {"Dialer.LocalAddr", Field, 1, ""},
+ {"Dialer.Resolver", Field, 8, ""},
+ {"Dialer.Timeout", Field, 1, ""},
+ {"ErrClosed", Var, 16, ""},
+ {"ErrWriteToConnected", Var, 0, ""},
+ {"Error", Type, 0, ""},
+ {"FileConn", Func, 0, "func(f *os.File) (c Conn, err error)"},
+ {"FileListener", Func, 0, "func(f *os.File) (ln Listener, err error)"},
+ {"FilePacketConn", Func, 0, "func(f *os.File) (c PacketConn, err error)"},
+ {"FlagBroadcast", Const, 0, ""},
+ {"FlagLoopback", Const, 0, ""},
+ {"FlagMulticast", Const, 0, ""},
+ {"FlagPointToPoint", Const, 0, ""},
+ {"FlagRunning", Const, 20, ""},
+ {"FlagUp", Const, 0, ""},
+ {"Flags", Type, 0, ""},
+ {"HardwareAddr", Type, 0, ""},
+ {"IP", Type, 0, ""},
+ {"IPAddr", Type, 0, ""},
+ {"IPAddr.IP", Field, 0, ""},
+ {"IPAddr.Zone", Field, 1, ""},
+ {"IPConn", Type, 0, ""},
+ {"IPMask", Type, 0, ""},
+ {"IPNet", Type, 0, ""},
+ {"IPNet.IP", Field, 0, ""},
+ {"IPNet.Mask", Field, 0, ""},
+ {"IPv4", Func, 0, "func(a byte, b byte, c byte, d byte) IP"},
+ {"IPv4Mask", Func, 0, "func(a byte, b byte, c byte, d byte) IPMask"},
+ {"IPv4allrouter", Var, 0, ""},
+ {"IPv4allsys", Var, 0, ""},
+ {"IPv4bcast", Var, 0, ""},
+ {"IPv4len", Const, 0, ""},
+ {"IPv4zero", Var, 0, ""},
+ {"IPv6interfacelocalallnodes", Var, 0, ""},
+ {"IPv6len", Const, 0, ""},
+ {"IPv6linklocalallnodes", Var, 0, ""},
+ {"IPv6linklocalallrouters", Var, 0, ""},
+ {"IPv6loopback", Var, 0, ""},
+ {"IPv6unspecified", Var, 0, ""},
+ {"IPv6zero", Var, 0, ""},
+ {"Interface", Type, 0, ""},
+ {"Interface.Flags", Field, 0, ""},
+ {"Interface.HardwareAddr", Field, 0, ""},
+ {"Interface.Index", Field, 0, ""},
+ {"Interface.MTU", Field, 0, ""},
+ {"Interface.Name", Field, 0, ""},
+ {"InterfaceAddrs", Func, 0, "func() ([]Addr, error)"},
+ {"InterfaceByIndex", Func, 0, "func(index int) (*Interface, error)"},
+ {"InterfaceByName", Func, 0, "func(name string) (*Interface, error)"},
+ {"Interfaces", Func, 0, "func() ([]Interface, error)"},
+ {"InvalidAddrError", Type, 0, ""},
+ {"JoinHostPort", Func, 0, "func(host string, port string) string"},
+ {"KeepAliveConfig", Type, 23, ""},
+ {"KeepAliveConfig.Count", Field, 23, ""},
+ {"KeepAliveConfig.Enable", Field, 23, ""},
+ {"KeepAliveConfig.Idle", Field, 23, ""},
+ {"KeepAliveConfig.Interval", Field, 23, ""},
+ {"Listen", Func, 0, "func(network string, address string) (Listener, error)"},
+ {"ListenConfig", Type, 11, ""},
+ {"ListenConfig.Control", Field, 11, ""},
+ {"ListenConfig.KeepAlive", Field, 13, ""},
+ {"ListenConfig.KeepAliveConfig", Field, 23, ""},
+ {"ListenIP", Func, 0, "func(network string, laddr *IPAddr) (*IPConn, error)"},
+ {"ListenMulticastUDP", Func, 0, "func(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error)"},
+ {"ListenPacket", Func, 0, "func(network string, address string) (PacketConn, error)"},
+ {"ListenTCP", Func, 0, "func(network string, laddr *TCPAddr) (*TCPListener, error)"},
+ {"ListenUDP", Func, 0, "func(network string, laddr *UDPAddr) (*UDPConn, error)"},
+ {"ListenUnix", Func, 0, "func(network string, laddr *UnixAddr) (*UnixListener, error)"},
+ {"ListenUnixgram", Func, 0, "func(network string, laddr *UnixAddr) (*UnixConn, error)"},
+ {"Listener", Type, 0, ""},
+ {"LookupAddr", Func, 0, "func(addr string) (names []string, err error)"},
+ {"LookupCNAME", Func, 0, "func(host string) (cname string, err error)"},
+ {"LookupHost", Func, 0, "func(host string) (addrs []string, err error)"},
+ {"LookupIP", Func, 0, "func(host string) ([]IP, error)"},
+ {"LookupMX", Func, 0, "func(name string) ([]*MX, error)"},
+ {"LookupNS", Func, 1, "func(name string) ([]*NS, error)"},
+ {"LookupPort", Func, 0, "func(network string, service string) (port int, err error)"},
+ {"LookupSRV", Func, 0, "func(service string, proto string, name string) (cname string, addrs []*SRV, err error)"},
+ {"LookupTXT", Func, 0, "func(name string) ([]string, error)"},
+ {"MX", Type, 0, ""},
+ {"MX.Host", Field, 0, ""},
+ {"MX.Pref", Field, 0, ""},
+ {"NS", Type, 1, ""},
+ {"NS.Host", Field, 1, ""},
+ {"OpError", Type, 0, ""},
+ {"OpError.Addr", Field, 0, ""},
+ {"OpError.Err", Field, 0, ""},
+ {"OpError.Net", Field, 0, ""},
+ {"OpError.Op", Field, 0, ""},
+ {"OpError.Source", Field, 5, ""},
+ {"PacketConn", Type, 0, ""},
+ {"ParseCIDR", Func, 0, "func(s string) (IP, *IPNet, error)"},
+ {"ParseError", Type, 0, ""},
+ {"ParseError.Text", Field, 0, ""},
+ {"ParseError.Type", Field, 0, ""},
+ {"ParseIP", Func, 0, "func(s string) IP"},
+ {"ParseMAC", Func, 0, "func(s string) (hw HardwareAddr, err error)"},
+ {"Pipe", Func, 0, "func() (Conn, Conn)"},
+ {"ResolveIPAddr", Func, 0, "func(network string, address string) (*IPAddr, error)"},
+ {"ResolveTCPAddr", Func, 0, "func(network string, address string) (*TCPAddr, error)"},
+ {"ResolveUDPAddr", Func, 0, "func(network string, address string) (*UDPAddr, error)"},
+ {"ResolveUnixAddr", Func, 0, "func(network string, address string) (*UnixAddr, error)"},
+ {"Resolver", Type, 8, ""},
+ {"Resolver.Dial", Field, 9, ""},
+ {"Resolver.PreferGo", Field, 8, ""},
+ {"Resolver.StrictErrors", Field, 9, ""},
+ {"SRV", Type, 0, ""},
+ {"SRV.Port", Field, 0, ""},
+ {"SRV.Priority", Field, 0, ""},
+ {"SRV.Target", Field, 0, ""},
+ {"SRV.Weight", Field, 0, ""},
+ {"SplitHostPort", Func, 0, "func(hostport string) (host string, port string, err error)"},
+ {"TCPAddr", Type, 0, ""},
+ {"TCPAddr.IP", Field, 0, ""},
+ {"TCPAddr.Port", Field, 0, ""},
+ {"TCPAddr.Zone", Field, 1, ""},
+ {"TCPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *TCPAddr"},
+ {"TCPConn", Type, 0, ""},
+ {"TCPListener", Type, 0, ""},
+ {"UDPAddr", Type, 0, ""},
+ {"UDPAddr.IP", Field, 0, ""},
+ {"UDPAddr.Port", Field, 0, ""},
+ {"UDPAddr.Zone", Field, 1, ""},
+ {"UDPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *UDPAddr"},
+ {"UDPConn", Type, 0, ""},
+ {"UnixAddr", Type, 0, ""},
+ {"UnixAddr.Name", Field, 0, ""},
+ {"UnixAddr.Net", Field, 0, ""},
+ {"UnixConn", Type, 0, ""},
+ {"UnixListener", Type, 0, ""},
+ {"UnknownNetworkError", Type, 0, ""},
+ },
+ "net/http": {
+ {"(*Client).CloseIdleConnections", Method, 12, ""},
+ {"(*Client).Do", Method, 0, ""},
+ {"(*Client).Get", Method, 0, ""},
+ {"(*Client).Head", Method, 0, ""},
+ {"(*Client).Post", Method, 0, ""},
+ {"(*Client).PostForm", Method, 0, ""},
+ {"(*Cookie).String", Method, 0, ""},
+ {"(*Cookie).Valid", Method, 18, ""},
+ {"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""},
+ {"(*CrossOriginProtection).AddTrustedOrigin", Method, 25, ""},
+ {"(*CrossOriginProtection).Check", Method, 25, ""},
+ {"(*CrossOriginProtection).Handler", Method, 25, ""},
+ {"(*CrossOriginProtection).SetDenyHandler", Method, 25, ""},
+ {"(*MaxBytesError).Error", Method, 19, ""},
+ {"(*ProtocolError).Error", Method, 0, ""},
+ {"(*ProtocolError).Is", Method, 21, ""},
+ {"(*Protocols).SetHTTP1", Method, 24, ""},
+ {"(*Protocols).SetHTTP2", Method, 24, ""},
+ {"(*Protocols).SetUnencryptedHTTP2", Method, 24, ""},
+ {"(*Request).AddCookie", Method, 0, ""},
+ {"(*Request).BasicAuth", Method, 4, ""},
+ {"(*Request).Clone", Method, 13, ""},
+ {"(*Request).Context", Method, 7, ""},
+ {"(*Request).Cookie", Method, 0, ""},
+ {"(*Request).Cookies", Method, 0, ""},
+ {"(*Request).CookiesNamed", Method, 23, ""},
+ {"(*Request).FormFile", Method, 0, ""},
+ {"(*Request).FormValue", Method, 0, ""},
+ {"(*Request).MultipartReader", Method, 0, ""},
+ {"(*Request).ParseForm", Method, 0, ""},
+ {"(*Request).ParseMultipartForm", Method, 0, ""},
+ {"(*Request).PathValue", Method, 22, ""},
+ {"(*Request).PostFormValue", Method, 1, ""},
+ {"(*Request).ProtoAtLeast", Method, 0, ""},
+ {"(*Request).Referer", Method, 0, ""},
+ {"(*Request).SetBasicAuth", Method, 0, ""},
+ {"(*Request).SetPathValue", Method, 22, ""},
+ {"(*Request).UserAgent", Method, 0, ""},
+ {"(*Request).WithContext", Method, 7, ""},
+ {"(*Request).Write", Method, 0, ""},
+ {"(*Request).WriteProxy", Method, 0, ""},
+ {"(*Response).Cookies", Method, 0, ""},
+ {"(*Response).Location", Method, 0, ""},
+ {"(*Response).ProtoAtLeast", Method, 0, ""},
+ {"(*Response).Write", Method, 0, ""},
+ {"(*ResponseController).EnableFullDuplex", Method, 21, ""},
+ {"(*ResponseController).Flush", Method, 20, ""},
+ {"(*ResponseController).Hijack", Method, 20, ""},
+ {"(*ResponseController).SetReadDeadline", Method, 20, ""},
+ {"(*ResponseController).SetWriteDeadline", Method, 20, ""},
+ {"(*ServeMux).Handle", Method, 0, ""},
+ {"(*ServeMux).HandleFunc", Method, 0, ""},
+ {"(*ServeMux).Handler", Method, 1, ""},
+ {"(*ServeMux).ServeHTTP", Method, 0, ""},
+ {"(*Server).Close", Method, 8, ""},
+ {"(*Server).ListenAndServe", Method, 0, ""},
+ {"(*Server).ListenAndServeTLS", Method, 0, ""},
+ {"(*Server).RegisterOnShutdown", Method, 9, ""},
+ {"(*Server).Serve", Method, 0, ""},
+ {"(*Server).ServeTLS", Method, 9, ""},
+ {"(*Server).SetKeepAlivesEnabled", Method, 3, ""},
+ {"(*Server).Shutdown", Method, 8, ""},
+ {"(*Transport).CancelRequest", Method, 1, ""},
+ {"(*Transport).Clone", Method, 13, ""},
+ {"(*Transport).CloseIdleConnections", Method, 0, ""},
+ {"(*Transport).RegisterProtocol", Method, 0, ""},
+ {"(*Transport).RoundTrip", Method, 0, ""},
+ {"(ConnState).String", Method, 3, ""},
+ {"(Dir).Open", Method, 0, ""},
+ {"(HandlerFunc).ServeHTTP", Method, 0, ""},
+ {"(Header).Add", Method, 0, ""},
+ {"(Header).Clone", Method, 13, ""},
+ {"(Header).Del", Method, 0, ""},
+ {"(Header).Get", Method, 0, ""},
+ {"(Header).Set", Method, 0, ""},
+ {"(Header).Values", Method, 14, ""},
+ {"(Header).Write", Method, 0, ""},
+ {"(Header).WriteSubset", Method, 0, ""},
+ {"(Protocols).HTTP1", Method, 24, ""},
+ {"(Protocols).HTTP2", Method, 24, ""},
+ {"(Protocols).String", Method, 24, ""},
+ {"(Protocols).UnencryptedHTTP2", Method, 24, ""},
+ {"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"},
+ {"CanonicalHeaderKey", Func, 0, "func(s string) string"},
+ {"Client", Type, 0, ""},
+ {"Client.CheckRedirect", Field, 0, ""},
+ {"Client.Jar", Field, 0, ""},
+ {"Client.Timeout", Field, 3, ""},
+ {"Client.Transport", Field, 0, ""},
+ {"CloseNotifier", Type, 1, ""},
+ {"ConnState", Type, 3, ""},
+ {"Cookie", Type, 0, ""},
+ {"Cookie.Domain", Field, 0, ""},
+ {"Cookie.Expires", Field, 0, ""},
+ {"Cookie.HttpOnly", Field, 0, ""},
+ {"Cookie.MaxAge", Field, 0, ""},
+ {"Cookie.Name", Field, 0, ""},
+ {"Cookie.Partitioned", Field, 23, ""},
+ {"Cookie.Path", Field, 0, ""},
+ {"Cookie.Quoted", Field, 23, ""},
+ {"Cookie.Raw", Field, 0, ""},
+ {"Cookie.RawExpires", Field, 0, ""},
+ {"Cookie.SameSite", Field, 11, ""},
+ {"Cookie.Secure", Field, 0, ""},
+ {"Cookie.Unparsed", Field, 0, ""},
+ {"Cookie.Value", Field, 0, ""},
+ {"CookieJar", Type, 0, ""},
+ {"CrossOriginProtection", Type, 25, ""},
+ {"DefaultClient", Var, 0, ""},
+ {"DefaultMaxHeaderBytes", Const, 0, ""},
+ {"DefaultMaxIdleConnsPerHost", Const, 0, ""},
+ {"DefaultServeMux", Var, 0, ""},
+ {"DefaultTransport", Var, 0, ""},
+ {"DetectContentType", Func, 0, "func(data []byte) string"},
+ {"Dir", Type, 0, ""},
+ {"ErrAbortHandler", Var, 8, ""},
+ {"ErrBodyNotAllowed", Var, 0, ""},
+ {"ErrBodyReadAfterClose", Var, 0, ""},
+ {"ErrContentLength", Var, 0, ""},
+ {"ErrHandlerTimeout", Var, 0, ""},
+ {"ErrHeaderTooLong", Var, 0, ""},
+ {"ErrHijacked", Var, 0, ""},
+ {"ErrLineTooLong", Var, 0, ""},
+ {"ErrMissingBoundary", Var, 0, ""},
+ {"ErrMissingContentLength", Var, 0, ""},
+ {"ErrMissingFile", Var, 0, ""},
+ {"ErrNoCookie", Var, 0, ""},
+ {"ErrNoLocation", Var, 0, ""},
+ {"ErrNotMultipart", Var, 0, ""},
+ {"ErrNotSupported", Var, 0, ""},
+ {"ErrSchemeMismatch", Var, 21, ""},
+ {"ErrServerClosed", Var, 8, ""},
+ {"ErrShortBody", Var, 0, ""},
+ {"ErrSkipAltProtocol", Var, 6, ""},
+ {"ErrUnexpectedTrailer", Var, 0, ""},
+ {"ErrUseLastResponse", Var, 7, ""},
+ {"ErrWriteAfterFlush", Var, 0, ""},
+ {"Error", Func, 0, "func(w ResponseWriter, error string, code int)"},
+ {"FS", Func, 16, "func(fsys fs.FS) FileSystem"},
+ {"File", Type, 0, ""},
+ {"FileServer", Func, 0, "func(root FileSystem) Handler"},
+ {"FileServerFS", Func, 22, "func(root fs.FS) Handler"},
+ {"FileSystem", Type, 0, ""},
+ {"Flusher", Type, 0, ""},
+ {"Get", Func, 0, "func(url string) (resp *Response, err error)"},
+ {"HTTP2Config", Type, 24, ""},
+ {"HTTP2Config.CountError", Field, 24, ""},
+ {"HTTP2Config.MaxConcurrentStreams", Field, 24, ""},
+ {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24, ""},
+ {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24, ""},
+ {"HTTP2Config.MaxReadFrameSize", Field, 24, ""},
+ {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24, ""},
+ {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24, ""},
+ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""},
+ {"HTTP2Config.PingTimeout", Field, 24, ""},
+ {"HTTP2Config.SendPingTimeout", Field, 24, ""},
+ {"HTTP2Config.WriteByteTimeout", Field, 24, ""},
+ {"Handle", Func, 0, "func(pattern string, handler Handler)"},
+ {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"},
+ {"Handler", Type, 0, ""},
+ {"HandlerFunc", Type, 0, ""},
+ {"Head", Func, 0, "func(url string) (resp *Response, err error)"},
+ {"Header", Type, 0, ""},
+ {"Hijacker", Type, 0, ""},
+ {"ListenAndServe", Func, 0, "func(addr string, handler Handler) error"},
+ {"ListenAndServeTLS", Func, 0, "func(addr string, certFile string, keyFile string, handler Handler) error"},
+ {"LocalAddrContextKey", Var, 7, ""},
+ {"MaxBytesError", Type, 19, ""},
+ {"MaxBytesError.Limit", Field, 19, ""},
+ {"MaxBytesHandler", Func, 18, "func(h Handler, n int64) Handler"},
+ {"MaxBytesReader", Func, 0, "func(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser"},
+ {"MethodConnect", Const, 6, ""},
+ {"MethodDelete", Const, 6, ""},
+ {"MethodGet", Const, 6, ""},
+ {"MethodHead", Const, 6, ""},
+ {"MethodOptions", Const, 6, ""},
+ {"MethodPatch", Const, 6, ""},
+ {"MethodPost", Const, 6, ""},
+ {"MethodPut", Const, 6, ""},
+ {"MethodTrace", Const, 6, ""},
+ {"NewCrossOriginProtection", Func, 25, "func() *CrossOriginProtection"},
+ {"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"},
+ {"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"},
+ {"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"},
+ {"NewRequestWithContext", Func, 13, "func(ctx context.Context, method string, url string, body io.Reader) (*Request, error)"},
+ {"NewResponseController", Func, 20, "func(rw ResponseWriter) *ResponseController"},
+ {"NewServeMux", Func, 0, "func() *ServeMux"},
+ {"NoBody", Var, 8, ""},
+ {"NotFound", Func, 0, "func(w ResponseWriter, r *Request)"},
+ {"NotFoundHandler", Func, 0, "func() Handler"},
+ {"ParseCookie", Func, 23, "func(line string) ([]*Cookie, error)"},
+ {"ParseHTTPVersion", Func, 0, "func(vers string) (major int, minor int, ok bool)"},
+ {"ParseSetCookie", Func, 23, "func(line string) (*Cookie, error)"},
+ {"ParseTime", Func, 1, "func(text string) (t time.Time, err error)"},
+ {"Post", Func, 0, "func(url string, contentType string, body io.Reader) (resp *Response, err error)"},
+ {"PostForm", Func, 0, "func(url string, data url.Values) (resp *Response, err error)"},
+ {"ProtocolError", Type, 0, ""},
+ {"ProtocolError.ErrorString", Field, 0, ""},
+ {"Protocols", Type, 24, ""},
+ {"ProxyFromEnvironment", Func, 0, "func(req *Request) (*url.URL, error)"},
+ {"ProxyURL", Func, 0, "func(fixedURL *url.URL) func(*Request) (*url.URL, error)"},
+ {"PushOptions", Type, 8, ""},
+ {"PushOptions.Header", Field, 8, ""},
+ {"PushOptions.Method", Field, 8, ""},
+ {"Pusher", Type, 8, ""},
+ {"ReadRequest", Func, 0, "func(b *bufio.Reader) (*Request, error)"},
+ {"ReadResponse", Func, 0, "func(r *bufio.Reader, req *Request) (*Response, error)"},
+ {"Redirect", Func, 0, "func(w ResponseWriter, r *Request, url string, code int)"},
+ {"RedirectHandler", Func, 0, "func(url string, code int) Handler"},
+ {"Request", Type, 0, ""},
+ {"Request.Body", Field, 0, ""},
+ {"Request.Cancel", Field, 5, ""},
+ {"Request.Close", Field, 0, ""},
+ {"Request.ContentLength", Field, 0, ""},
+ {"Request.Form", Field, 0, ""},
+ {"Request.GetBody", Field, 8, ""},
+ {"Request.Header", Field, 0, ""},
+ {"Request.Host", Field, 0, ""},
+ {"Request.Method", Field, 0, ""},
+ {"Request.MultipartForm", Field, 0, ""},
+ {"Request.Pattern", Field, 23, ""},
+ {"Request.PostForm", Field, 1, ""},
+ {"Request.Proto", Field, 0, ""},
+ {"Request.ProtoMajor", Field, 0, ""},
+ {"Request.ProtoMinor", Field, 0, ""},
+ {"Request.RemoteAddr", Field, 0, ""},
+ {"Request.RequestURI", Field, 0, ""},
+ {"Request.Response", Field, 7, ""},
+ {"Request.TLS", Field, 0, ""},
+ {"Request.Trailer", Field, 0, ""},
+ {"Request.TransferEncoding", Field, 0, ""},
+ {"Request.URL", Field, 0, ""},
+ {"Response", Type, 0, ""},
+ {"Response.Body", Field, 0, ""},
+ {"Response.Close", Field, 0, ""},
+ {"Response.ContentLength", Field, 0, ""},
+ {"Response.Header", Field, 0, ""},
+ {"Response.Proto", Field, 0, ""},
+ {"Response.ProtoMajor", Field, 0, ""},
+ {"Response.ProtoMinor", Field, 0, ""},
+ {"Response.Request", Field, 0, ""},
+ {"Response.Status", Field, 0, ""},
+ {"Response.StatusCode", Field, 0, ""},
+ {"Response.TLS", Field, 3, ""},
+ {"Response.Trailer", Field, 0, ""},
+ {"Response.TransferEncoding", Field, 0, ""},
+ {"Response.Uncompressed", Field, 7, ""},
+ {"ResponseController", Type, 20, ""},
+ {"ResponseWriter", Type, 0, ""},
+ {"RoundTripper", Type, 0, ""},
+ {"SameSite", Type, 11, ""},
+ {"SameSiteDefaultMode", Const, 11, ""},
+ {"SameSiteLaxMode", Const, 11, ""},
+ {"SameSiteNoneMode", Const, 13, ""},
+ {"SameSiteStrictMode", Const, 11, ""},
+ {"Serve", Func, 0, "func(l net.Listener, handler Handler) error"},
+ {"ServeContent", Func, 0, "func(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker)"},
+ {"ServeFile", Func, 0, "func(w ResponseWriter, r *Request, name string)"},
+ {"ServeFileFS", Func, 22, "func(w ResponseWriter, r *Request, fsys fs.FS, name string)"},
+ {"ServeMux", Type, 0, ""},
+ {"ServeTLS", Func, 9, "func(l net.Listener, handler Handler, certFile string, keyFile string) error"},
+ {"Server", Type, 0, ""},
+ {"Server.Addr", Field, 0, ""},
+ {"Server.BaseContext", Field, 13, ""},
+ {"Server.ConnContext", Field, 13, ""},
+ {"Server.ConnState", Field, 3, ""},
+ {"Server.DisableGeneralOptionsHandler", Field, 20, ""},
+ {"Server.ErrorLog", Field, 3, ""},
+ {"Server.HTTP2", Field, 24, ""},
+ {"Server.Handler", Field, 0, ""},
+ {"Server.IdleTimeout", Field, 8, ""},
+ {"Server.MaxHeaderBytes", Field, 0, ""},
+ {"Server.Protocols", Field, 24, ""},
+ {"Server.ReadHeaderTimeout", Field, 8, ""},
+ {"Server.ReadTimeout", Field, 0, ""},
+ {"Server.TLSConfig", Field, 0, ""},
+ {"Server.TLSNextProto", Field, 1, ""},
+ {"Server.WriteTimeout", Field, 0, ""},
+ {"ServerContextKey", Var, 7, ""},
+ {"SetCookie", Func, 0, "func(w ResponseWriter, cookie *Cookie)"},
+ {"StateActive", Const, 3, ""},
+ {"StateClosed", Const, 3, ""},
+ {"StateHijacked", Const, 3, ""},
+ {"StateIdle", Const, 3, ""},
+ {"StateNew", Const, 3, ""},
+ {"StatusAccepted", Const, 0, ""},
+ {"StatusAlreadyReported", Const, 7, ""},
+ {"StatusBadGateway", Const, 0, ""},
+ {"StatusBadRequest", Const, 0, ""},
+ {"StatusConflict", Const, 0, ""},
+ {"StatusContinue", Const, 0, ""},
+ {"StatusCreated", Const, 0, ""},
+ {"StatusEarlyHints", Const, 13, ""},
+ {"StatusExpectationFailed", Const, 0, ""},
+ {"StatusFailedDependency", Const, 7, ""},
+ {"StatusForbidden", Const, 0, ""},
+ {"StatusFound", Const, 0, ""},
+ {"StatusGatewayTimeout", Const, 0, ""},
+ {"StatusGone", Const, 0, ""},
+ {"StatusHTTPVersionNotSupported", Const, 0, ""},
+ {"StatusIMUsed", Const, 7, ""},
+ {"StatusInsufficientStorage", Const, 7, ""},
+ {"StatusInternalServerError", Const, 0, ""},
+ {"StatusLengthRequired", Const, 0, ""},
+ {"StatusLocked", Const, 7, ""},
+ {"StatusLoopDetected", Const, 7, ""},
+ {"StatusMethodNotAllowed", Const, 0, ""},
+ {"StatusMisdirectedRequest", Const, 11, ""},
+ {"StatusMovedPermanently", Const, 0, ""},
+ {"StatusMultiStatus", Const, 7, ""},
+ {"StatusMultipleChoices", Const, 0, ""},
+ {"StatusNetworkAuthenticationRequired", Const, 6, ""},
+ {"StatusNoContent", Const, 0, ""},
+ {"StatusNonAuthoritativeInfo", Const, 0, ""},
+ {"StatusNotAcceptable", Const, 0, ""},
+ {"StatusNotExtended", Const, 7, ""},
+ {"StatusNotFound", Const, 0, ""},
+ {"StatusNotImplemented", Const, 0, ""},
+ {"StatusNotModified", Const, 0, ""},
+ {"StatusOK", Const, 0, ""},
+ {"StatusPartialContent", Const, 0, ""},
+ {"StatusPaymentRequired", Const, 0, ""},
+ {"StatusPermanentRedirect", Const, 7, ""},
+ {"StatusPreconditionFailed", Const, 0, ""},
+ {"StatusPreconditionRequired", Const, 6, ""},
+ {"StatusProcessing", Const, 7, ""},
+ {"StatusProxyAuthRequired", Const, 0, ""},
+ {"StatusRequestEntityTooLarge", Const, 0, ""},
+ {"StatusRequestHeaderFieldsTooLarge", Const, 6, ""},
+ {"StatusRequestTimeout", Const, 0, ""},
+ {"StatusRequestURITooLong", Const, 0, ""},
+ {"StatusRequestedRangeNotSatisfiable", Const, 0, ""},
+ {"StatusResetContent", Const, 0, ""},
+ {"StatusSeeOther", Const, 0, ""},
+ {"StatusServiceUnavailable", Const, 0, ""},
+ {"StatusSwitchingProtocols", Const, 0, ""},
+ {"StatusTeapot", Const, 0, ""},
+ {"StatusTemporaryRedirect", Const, 0, ""},
+ {"StatusText", Func, 0, "func(code int) string"},
+ {"StatusTooEarly", Const, 12, ""},
+ {"StatusTooManyRequests", Const, 6, ""},
+ {"StatusUnauthorized", Const, 0, ""},
+ {"StatusUnavailableForLegalReasons", Const, 6, ""},
+ {"StatusUnprocessableEntity", Const, 7, ""},
+ {"StatusUnsupportedMediaType", Const, 0, ""},
+ {"StatusUpgradeRequired", Const, 7, ""},
+ {"StatusUseProxy", Const, 0, ""},
+ {"StatusVariantAlsoNegotiates", Const, 7, ""},
+ {"StripPrefix", Func, 0, "func(prefix string, h Handler) Handler"},
+ {"TimeFormat", Const, 0, ""},
+ {"TimeoutHandler", Func, 0, "func(h Handler, dt time.Duration, msg string) Handler"},
+ {"TrailerPrefix", Const, 8, ""},
+ {"Transport", Type, 0, ""},
+ {"Transport.Dial", Field, 0, ""},
+ {"Transport.DialContext", Field, 7, ""},
+ {"Transport.DialTLS", Field, 4, ""},
+ {"Transport.DialTLSContext", Field, 14, ""},
+ {"Transport.DisableCompression", Field, 0, ""},
+ {"Transport.DisableKeepAlives", Field, 0, ""},
+ {"Transport.ExpectContinueTimeout", Field, 6, ""},
+ {"Transport.ForceAttemptHTTP2", Field, 13, ""},
+ {"Transport.GetProxyConnectHeader", Field, 16, ""},
+ {"Transport.HTTP2", Field, 24, ""},
+ {"Transport.IdleConnTimeout", Field, 7, ""},
+ {"Transport.MaxConnsPerHost", Field, 11, ""},
+ {"Transport.MaxIdleConns", Field, 7, ""},
+ {"Transport.MaxIdleConnsPerHost", Field, 0, ""},
+ {"Transport.MaxResponseHeaderBytes", Field, 7, ""},
+ {"Transport.OnProxyConnectResponse", Field, 20, ""},
+ {"Transport.Protocols", Field, 24, ""},
+ {"Transport.Proxy", Field, 0, ""},
+ {"Transport.ProxyConnectHeader", Field, 8, ""},
+ {"Transport.ReadBufferSize", Field, 13, ""},
+ {"Transport.ResponseHeaderTimeout", Field, 1, ""},
+ {"Transport.TLSClientConfig", Field, 0, ""},
+ {"Transport.TLSHandshakeTimeout", Field, 3, ""},
+ {"Transport.TLSNextProto", Field, 6, ""},
+ {"Transport.WriteBufferSize", Field, 13, ""},
+ },
+ "net/http/cgi": {
+ {"(*Handler).ServeHTTP", Method, 0, ""},
+ {"Handler", Type, 0, ""},
+ {"Handler.Args", Field, 0, ""},
+ {"Handler.Dir", Field, 0, ""},
+ {"Handler.Env", Field, 0, ""},
+ {"Handler.InheritEnv", Field, 0, ""},
+ {"Handler.Logger", Field, 0, ""},
+ {"Handler.Path", Field, 0, ""},
+ {"Handler.PathLocationHandler", Field, 0, ""},
+ {"Handler.Root", Field, 0, ""},
+ {"Handler.Stderr", Field, 7, ""},
+ {"Request", Func, 0, "func() (*http.Request, error)"},
+ {"RequestFromMap", Func, 0, "func(params map[string]string) (*http.Request, error)"},
+ {"Serve", Func, 0, "func(handler http.Handler) error"},
+ },
+ "net/http/cookiejar": {
+ {"(*Jar).Cookies", Method, 1, ""},
+ {"(*Jar).SetCookies", Method, 1, ""},
+ {"Jar", Type, 1, ""},
+ {"New", Func, 1, "func(o *Options) (*Jar, error)"},
+ {"Options", Type, 1, ""},
+ {"Options.PublicSuffixList", Field, 1, ""},
+ {"PublicSuffixList", Type, 1, ""},
+ },
+ "net/http/fcgi": {
+ {"ErrConnClosed", Var, 5, ""},
+ {"ErrRequestAborted", Var, 5, ""},
+ {"ProcessEnv", Func, 9, "func(r *http.Request) map[string]string"},
+ {"Serve", Func, 0, "func(l net.Listener, handler http.Handler) error"},
+ },
+ "net/http/httptest": {
+ {"(*ResponseRecorder).Flush", Method, 0, ""},
+ {"(*ResponseRecorder).Header", Method, 0, ""},
+ {"(*ResponseRecorder).Result", Method, 7, ""},
+ {"(*ResponseRecorder).Write", Method, 0, ""},
+ {"(*ResponseRecorder).WriteHeader", Method, 0, ""},
+ {"(*ResponseRecorder).WriteString", Method, 6, ""},
+ {"(*Server).Certificate", Method, 9, ""},
+ {"(*Server).Client", Method, 9, ""},
+ {"(*Server).Close", Method, 0, ""},
+ {"(*Server).CloseClientConnections", Method, 0, ""},
+ {"(*Server).Start", Method, 0, ""},
+ {"(*Server).StartTLS", Method, 0, ""},
+ {"DefaultRemoteAddr", Const, 0, ""},
+ {"NewRecorder", Func, 0, "func() *ResponseRecorder"},
+ {"NewRequest", Func, 7, "func(method string, target string, body io.Reader) *http.Request"},
+ {"NewRequestWithContext", Func, 23, "func(ctx context.Context, method string, target string, body io.Reader) *http.Request"},
+ {"NewServer", Func, 0, "func(handler http.Handler) *Server"},
+ {"NewTLSServer", Func, 0, "func(handler http.Handler) *Server"},
+ {"NewUnstartedServer", Func, 0, "func(handler http.Handler) *Server"},
+ {"ResponseRecorder", Type, 0, ""},
+ {"ResponseRecorder.Body", Field, 0, ""},
+ {"ResponseRecorder.Code", Field, 0, ""},
+ {"ResponseRecorder.Flushed", Field, 0, ""},
+ {"ResponseRecorder.HeaderMap", Field, 0, ""},
+ {"Server", Type, 0, ""},
+ {"Server.Config", Field, 0, ""},
+ {"Server.EnableHTTP2", Field, 14, ""},
+ {"Server.Listener", Field, 0, ""},
+ {"Server.TLS", Field, 0, ""},
+ {"Server.URL", Field, 0, ""},
+ },
+ "net/http/httptrace": {
+ {"ClientTrace", Type, 7, ""},
+ {"ClientTrace.ConnectDone", Field, 7, ""},
+ {"ClientTrace.ConnectStart", Field, 7, ""},
+ {"ClientTrace.DNSDone", Field, 7, ""},
+ {"ClientTrace.DNSStart", Field, 7, ""},
+ {"ClientTrace.GetConn", Field, 7, ""},
+ {"ClientTrace.Got100Continue", Field, 7, ""},
+ {"ClientTrace.Got1xxResponse", Field, 11, ""},
+ {"ClientTrace.GotConn", Field, 7, ""},
+ {"ClientTrace.GotFirstResponseByte", Field, 7, ""},
+ {"ClientTrace.PutIdleConn", Field, 7, ""},
+ {"ClientTrace.TLSHandshakeDone", Field, 8, ""},
+ {"ClientTrace.TLSHandshakeStart", Field, 8, ""},
+ {"ClientTrace.Wait100Continue", Field, 7, ""},
+ {"ClientTrace.WroteHeaderField", Field, 11, ""},
+ {"ClientTrace.WroteHeaders", Field, 7, ""},
+ {"ClientTrace.WroteRequest", Field, 7, ""},
+ {"ContextClientTrace", Func, 7, "func(ctx context.Context) *ClientTrace"},
+ {"DNSDoneInfo", Type, 7, ""},
+ {"DNSDoneInfo.Addrs", Field, 7, ""},
+ {"DNSDoneInfo.Coalesced", Field, 7, ""},
+ {"DNSDoneInfo.Err", Field, 7, ""},
+ {"DNSStartInfo", Type, 7, ""},
+ {"DNSStartInfo.Host", Field, 7, ""},
+ {"GotConnInfo", Type, 7, ""},
+ {"GotConnInfo.Conn", Field, 7, ""},
+ {"GotConnInfo.IdleTime", Field, 7, ""},
+ {"GotConnInfo.Reused", Field, 7, ""},
+ {"GotConnInfo.WasIdle", Field, 7, ""},
+ {"WithClientTrace", Func, 7, "func(ctx context.Context, trace *ClientTrace) context.Context"},
+ {"WroteRequestInfo", Type, 7, ""},
+ {"WroteRequestInfo.Err", Field, 7, ""},
+ },
+ "net/http/httputil": {
+ {"(*ClientConn).Close", Method, 0, ""},
+ {"(*ClientConn).Do", Method, 0, ""},
+ {"(*ClientConn).Hijack", Method, 0, ""},
+ {"(*ClientConn).Pending", Method, 0, ""},
+ {"(*ClientConn).Read", Method, 0, ""},
+ {"(*ClientConn).Write", Method, 0, ""},
+ {"(*ProxyRequest).SetURL", Method, 20, ""},
+ {"(*ProxyRequest).SetXForwarded", Method, 20, ""},
+ {"(*ReverseProxy).ServeHTTP", Method, 0, ""},
+ {"(*ServerConn).Close", Method, 0, ""},
+ {"(*ServerConn).Hijack", Method, 0, ""},
+ {"(*ServerConn).Pending", Method, 0, ""},
+ {"(*ServerConn).Read", Method, 0, ""},
+ {"(*ServerConn).Write", Method, 0, ""},
+ {"BufferPool", Type, 6, ""},
+ {"ClientConn", Type, 0, ""},
+ {"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
+ {"DumpRequestOut", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
+ {"DumpResponse", Func, 0, "func(resp *http.Response, body bool) ([]byte, error)"},
+ {"ErrClosed", Var, 0, ""},
+ {"ErrLineTooLong", Var, 0, ""},
+ {"ErrPersistEOF", Var, 0, ""},
+ {"ErrPipeline", Var, 0, ""},
+ {"NewChunkedReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"NewChunkedWriter", Func, 0, "func(w io.Writer) io.WriteCloser"},
+ {"NewClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
+ {"NewProxyClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
+ {"NewServerConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ServerConn"},
+ {"NewSingleHostReverseProxy", Func, 0, "func(target *url.URL) *ReverseProxy"},
+ {"ProxyRequest", Type, 20, ""},
+ {"ProxyRequest.In", Field, 20, ""},
+ {"ProxyRequest.Out", Field, 20, ""},
+ {"ReverseProxy", Type, 0, ""},
+ {"ReverseProxy.BufferPool", Field, 6, ""},
+ {"ReverseProxy.Director", Field, 0, ""},
+ {"ReverseProxy.ErrorHandler", Field, 11, ""},
+ {"ReverseProxy.ErrorLog", Field, 4, ""},
+ {"ReverseProxy.FlushInterval", Field, 0, ""},
+ {"ReverseProxy.ModifyResponse", Field, 8, ""},
+ {"ReverseProxy.Rewrite", Field, 20, ""},
+ {"ReverseProxy.Transport", Field, 0, ""},
+ {"ServerConn", Type, 0, ""},
+ },
+ "net/http/pprof": {
+ {"Cmdline", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Handler", Func, 0, "func(name string) http.Handler"},
+ {"Index", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Profile", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Symbol", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Trace", Func, 5, "func(w http.ResponseWriter, r *http.Request)"},
+ },
+ "net/mail": {
+ {"(*Address).String", Method, 0, ""},
+ {"(*AddressParser).Parse", Method, 5, ""},
+ {"(*AddressParser).ParseList", Method, 5, ""},
+ {"(Header).AddressList", Method, 0, ""},
+ {"(Header).Date", Method, 0, ""},
+ {"(Header).Get", Method, 0, ""},
+ {"Address", Type, 0, ""},
+ {"Address.Address", Field, 0, ""},
+ {"Address.Name", Field, 0, ""},
+ {"AddressParser", Type, 5, ""},
+ {"AddressParser.WordDecoder", Field, 5, ""},
+ {"ErrHeaderNotPresent", Var, 0, ""},
+ {"Header", Type, 0, ""},
+ {"Message", Type, 0, ""},
+ {"Message.Body", Field, 0, ""},
+ {"Message.Header", Field, 0, ""},
+ {"ParseAddress", Func, 1, "func(address string) (*Address, error)"},
+ {"ParseAddressList", Func, 1, "func(list string) ([]*Address, error)"},
+ {"ParseDate", Func, 8, "func(date string) (time.Time, error)"},
+ {"ReadMessage", Func, 0, "func(r io.Reader) (msg *Message, err error)"},
+ },
+ "net/netip": {
+ {"(*Addr).UnmarshalBinary", Method, 18, ""},
+ {"(*Addr).UnmarshalText", Method, 18, ""},
+ {"(*AddrPort).UnmarshalBinary", Method, 18, ""},
+ {"(*AddrPort).UnmarshalText", Method, 18, ""},
+ {"(*Prefix).UnmarshalBinary", Method, 18, ""},
+ {"(*Prefix).UnmarshalText", Method, 18, ""},
+ {"(Addr).AppendBinary", Method, 24, ""},
+ {"(Addr).AppendText", Method, 24, ""},
+ {"(Addr).AppendTo", Method, 18, ""},
+ {"(Addr).As16", Method, 18, ""},
+ {"(Addr).As4", Method, 18, ""},
+ {"(Addr).AsSlice", Method, 18, ""},
+ {"(Addr).BitLen", Method, 18, ""},
+ {"(Addr).Compare", Method, 18, ""},
+ {"(Addr).Is4", Method, 18, ""},
+ {"(Addr).Is4In6", Method, 18, ""},
+ {"(Addr).Is6", Method, 18, ""},
+ {"(Addr).IsGlobalUnicast", Method, 18, ""},
+ {"(Addr).IsInterfaceLocalMulticast", Method, 18, ""},
+ {"(Addr).IsLinkLocalMulticast", Method, 18, ""},
+ {"(Addr).IsLinkLocalUnicast", Method, 18, ""},
+ {"(Addr).IsLoopback", Method, 18, ""},
+ {"(Addr).IsMulticast", Method, 18, ""},
+ {"(Addr).IsPrivate", Method, 18, ""},
+ {"(Addr).IsUnspecified", Method, 18, ""},
+ {"(Addr).IsValid", Method, 18, ""},
+ {"(Addr).Less", Method, 18, ""},
+ {"(Addr).MarshalBinary", Method, 18, ""},
+ {"(Addr).MarshalText", Method, 18, ""},
+ {"(Addr).Next", Method, 18, ""},
+ {"(Addr).Prefix", Method, 18, ""},
+ {"(Addr).Prev", Method, 18, ""},
+ {"(Addr).String", Method, 18, ""},
+ {"(Addr).StringExpanded", Method, 18, ""},
+ {"(Addr).Unmap", Method, 18, ""},
+ {"(Addr).WithZone", Method, 18, ""},
+ {"(Addr).Zone", Method, 18, ""},
+ {"(AddrPort).Addr", Method, 18, ""},
+ {"(AddrPort).AppendBinary", Method, 24, ""},
+ {"(AddrPort).AppendText", Method, 24, ""},
+ {"(AddrPort).AppendTo", Method, 18, ""},
+ {"(AddrPort).Compare", Method, 22, ""},
+ {"(AddrPort).IsValid", Method, 18, ""},
+ {"(AddrPort).MarshalBinary", Method, 18, ""},
+ {"(AddrPort).MarshalText", Method, 18, ""},
+ {"(AddrPort).Port", Method, 18, ""},
+ {"(AddrPort).String", Method, 18, ""},
+ {"(Prefix).Addr", Method, 18, ""},
+ {"(Prefix).AppendBinary", Method, 24, ""},
+ {"(Prefix).AppendText", Method, 24, ""},
+ {"(Prefix).AppendTo", Method, 18, ""},
+ {"(Prefix).Bits", Method, 18, ""},
+ {"(Prefix).Contains", Method, 18, ""},
+ {"(Prefix).IsSingleIP", Method, 18, ""},
+ {"(Prefix).IsValid", Method, 18, ""},
+ {"(Prefix).MarshalBinary", Method, 18, ""},
+ {"(Prefix).MarshalText", Method, 18, ""},
+ {"(Prefix).Masked", Method, 18, ""},
+ {"(Prefix).Overlaps", Method, 18, ""},
+ {"(Prefix).String", Method, 18, ""},
+ {"Addr", Type, 18, ""},
+ {"AddrFrom16", Func, 18, "func(addr [16]byte) Addr"},
+ {"AddrFrom4", Func, 18, "func(addr [4]byte) Addr"},
+ {"AddrFromSlice", Func, 18, "func(slice []byte) (ip Addr, ok bool)"},
+ {"AddrPort", Type, 18, ""},
+ {"AddrPortFrom", Func, 18, "func(ip Addr, port uint16) AddrPort"},
+ {"IPv4Unspecified", Func, 18, "func() Addr"},
+ {"IPv6LinkLocalAllNodes", Func, 18, "func() Addr"},
+ {"IPv6LinkLocalAllRouters", Func, 20, "func() Addr"},
+ {"IPv6Loopback", Func, 20, "func() Addr"},
+ {"IPv6Unspecified", Func, 18, "func() Addr"},
+ {"MustParseAddr", Func, 18, "func(s string) Addr"},
+ {"MustParseAddrPort", Func, 18, "func(s string) AddrPort"},
+ {"MustParsePrefix", Func, 18, "func(s string) Prefix"},
+ {"ParseAddr", Func, 18, "func(s string) (Addr, error)"},
+ {"ParseAddrPort", Func, 18, "func(s string) (AddrPort, error)"},
+ {"ParsePrefix", Func, 18, "func(s string) (Prefix, error)"},
+ {"Prefix", Type, 18, ""},
+ {"PrefixFrom", Func, 18, "func(ip Addr, bits int) Prefix"},
+ },
+ "net/rpc": {
+ {"(*Client).Call", Method, 0, ""},
+ {"(*Client).Close", Method, 0, ""},
+ {"(*Client).Go", Method, 0, ""},
+ {"(*Server).Accept", Method, 0, ""},
+ {"(*Server).HandleHTTP", Method, 0, ""},
+ {"(*Server).Register", Method, 0, ""},
+ {"(*Server).RegisterName", Method, 0, ""},
+ {"(*Server).ServeCodec", Method, 0, ""},
+ {"(*Server).ServeConn", Method, 0, ""},
+ {"(*Server).ServeHTTP", Method, 0, ""},
+ {"(*Server).ServeRequest", Method, 0, ""},
+ {"(ServerError).Error", Method, 0, ""},
+ {"Accept", Func, 0, "func(lis net.Listener)"},
+ {"Call", Type, 0, ""},
+ {"Call.Args", Field, 0, ""},
+ {"Call.Done", Field, 0, ""},
+ {"Call.Error", Field, 0, ""},
+ {"Call.Reply", Field, 0, ""},
+ {"Call.ServiceMethod", Field, 0, ""},
+ {"Client", Type, 0, ""},
+ {"ClientCodec", Type, 0, ""},
+ {"DefaultDebugPath", Const, 0, ""},
+ {"DefaultRPCPath", Const, 0, ""},
+ {"DefaultServer", Var, 0, ""},
+ {"Dial", Func, 0, "func(network string, address string) (*Client, error)"},
+ {"DialHTTP", Func, 0, "func(network string, address string) (*Client, error)"},
+ {"DialHTTPPath", Func, 0, "func(network string, address string, path string) (*Client, error)"},
+ {"ErrShutdown", Var, 0, ""},
+ {"HandleHTTP", Func, 0, "func()"},
+ {"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *Client"},
+ {"NewClientWithCodec", Func, 0, "func(codec ClientCodec) *Client"},
+ {"NewServer", Func, 0, "func() *Server"},
+ {"Register", Func, 0, "func(rcvr any) error"},
+ {"RegisterName", Func, 0, "func(name string, rcvr any) error"},
+ {"Request", Type, 0, ""},
+ {"Request.Seq", Field, 0, ""},
+ {"Request.ServiceMethod", Field, 0, ""},
+ {"Response", Type, 0, ""},
+ {"Response.Error", Field, 0, ""},
+ {"Response.Seq", Field, 0, ""},
+ {"Response.ServiceMethod", Field, 0, ""},
+ {"ServeCodec", Func, 0, "func(codec ServerCodec)"},
+ {"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
+ {"ServeRequest", Func, 0, "func(codec ServerCodec) error"},
+ {"Server", Type, 0, ""},
+ {"ServerCodec", Type, 0, ""},
+ {"ServerError", Type, 0, ""},
+ },
+ "net/rpc/jsonrpc": {
+ {"Dial", Func, 0, "func(network string, address string) (*rpc.Client, error)"},
+ {"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *rpc.Client"},
+ {"NewClientCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ClientCodec"},
+ {"NewServerCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ServerCodec"},
+ {"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
+ },
+ "net/smtp": {
+ {"(*Client).Auth", Method, 0, ""},
+ {"(*Client).Close", Method, 2, ""},
+ {"(*Client).Data", Method, 0, ""},
+ {"(*Client).Extension", Method, 0, ""},
+ {"(*Client).Hello", Method, 1, ""},
+ {"(*Client).Mail", Method, 0, ""},
+ {"(*Client).Noop", Method, 10, ""},
+ {"(*Client).Quit", Method, 0, ""},
+ {"(*Client).Rcpt", Method, 0, ""},
+ {"(*Client).Reset", Method, 0, ""},
+ {"(*Client).StartTLS", Method, 0, ""},
+ {"(*Client).TLSConnectionState", Method, 5, ""},
+ {"(*Client).Verify", Method, 0, ""},
+ {"Auth", Type, 0, ""},
+ {"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"},
+ {"Client", Type, 0, ""},
+ {"Client.Text", Field, 0, ""},
+ {"Dial", Func, 0, "func(addr string) (*Client, error)"},
+ {"NewClient", Func, 0, "func(conn net.Conn, host string) (*Client, error)"},
+ {"PlainAuth", Func, 0, "func(identity string, username string, password string, host string) Auth"},
+ {"SendMail", Func, 0, "func(addr string, a Auth, from string, to []string, msg []byte) error"},
+ {"ServerInfo", Type, 0, ""},
+ {"ServerInfo.Auth", Field, 0, ""},
+ {"ServerInfo.Name", Field, 0, ""},
+ {"ServerInfo.TLS", Field, 0, ""},
+ },
+ "net/textproto": {
+ {"(*Conn).Close", Method, 0, ""},
+ {"(*Conn).Cmd", Method, 0, ""},
+ {"(*Conn).DotReader", Method, 0, ""},
+ {"(*Conn).DotWriter", Method, 0, ""},
+ {"(*Conn).EndRequest", Method, 0, ""},
+ {"(*Conn).EndResponse", Method, 0, ""},
+ {"(*Conn).Next", Method, 0, ""},
+ {"(*Conn).PrintfLine", Method, 0, ""},
+ {"(*Conn).ReadCodeLine", Method, 0, ""},
+ {"(*Conn).ReadContinuedLine", Method, 0, ""},
+ {"(*Conn).ReadContinuedLineBytes", Method, 0, ""},
+ {"(*Conn).ReadDotBytes", Method, 0, ""},
+ {"(*Conn).ReadDotLines", Method, 0, ""},
+ {"(*Conn).ReadLine", Method, 0, ""},
+ {"(*Conn).ReadLineBytes", Method, 0, ""},
+ {"(*Conn).ReadMIMEHeader", Method, 0, ""},
+ {"(*Conn).ReadResponse", Method, 0, ""},
+ {"(*Conn).StartRequest", Method, 0, ""},
+ {"(*Conn).StartResponse", Method, 0, ""},
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Pipeline).EndRequest", Method, 0, ""},
+ {"(*Pipeline).EndResponse", Method, 0, ""},
+ {"(*Pipeline).Next", Method, 0, ""},
+ {"(*Pipeline).StartRequest", Method, 0, ""},
+ {"(*Pipeline).StartResponse", Method, 0, ""},
+ {"(*Reader).DotReader", Method, 0, ""},
+ {"(*Reader).ReadCodeLine", Method, 0, ""},
+ {"(*Reader).ReadContinuedLine", Method, 0, ""},
+ {"(*Reader).ReadContinuedLineBytes", Method, 0, ""},
+ {"(*Reader).ReadDotBytes", Method, 0, ""},
+ {"(*Reader).ReadDotLines", Method, 0, ""},
+ {"(*Reader).ReadLine", Method, 0, ""},
+ {"(*Reader).ReadLineBytes", Method, 0, ""},
+ {"(*Reader).ReadMIMEHeader", Method, 0, ""},
+ {"(*Reader).ReadResponse", Method, 0, ""},
+ {"(*Writer).DotWriter", Method, 0, ""},
+ {"(*Writer).PrintfLine", Method, 0, ""},
+ {"(MIMEHeader).Add", Method, 0, ""},
+ {"(MIMEHeader).Del", Method, 0, ""},
+ {"(MIMEHeader).Get", Method, 0, ""},
+ {"(MIMEHeader).Set", Method, 0, ""},
+ {"(MIMEHeader).Values", Method, 14, ""},
+ {"(ProtocolError).Error", Method, 0, ""},
+ {"CanonicalMIMEHeaderKey", Func, 0, "func(s string) string"},
+ {"Conn", Type, 0, ""},
+ {"Conn.Pipeline", Field, 0, ""},
+ {"Conn.Reader", Field, 0, ""},
+ {"Conn.Writer", Field, 0, ""},
+ {"Dial", Func, 0, "func(network string, addr string) (*Conn, error)"},
+ {"Error", Type, 0, ""},
+ {"Error.Code", Field, 0, ""},
+ {"Error.Msg", Field, 0, ""},
+ {"MIMEHeader", Type, 0, ""},
+ {"NewConn", Func, 0, "func(conn io.ReadWriteCloser) *Conn"},
+ {"NewReader", Func, 0, "func(r *bufio.Reader) *Reader"},
+ {"NewWriter", Func, 0, "func(w *bufio.Writer) *Writer"},
+ {"Pipeline", Type, 0, ""},
+ {"ProtocolError", Type, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.R", Field, 0, ""},
+ {"TrimBytes", Func, 1, "func(b []byte) []byte"},
+ {"TrimString", Func, 1, "func(s string) string"},
+ {"Writer", Type, 0, ""},
+ {"Writer.W", Field, 0, ""},
+ },
+ "net/url": {
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Error).Temporary", Method, 6, ""},
+ {"(*Error).Timeout", Method, 6, ""},
+ {"(*Error).Unwrap", Method, 13, ""},
+ {"(*URL).AppendBinary", Method, 24, ""},
+ {"(*URL).EscapedFragment", Method, 15, ""},
+ {"(*URL).EscapedPath", Method, 5, ""},
+ {"(*URL).Hostname", Method, 8, ""},
+ {"(*URL).IsAbs", Method, 0, ""},
+ {"(*URL).JoinPath", Method, 19, ""},
+ {"(*URL).MarshalBinary", Method, 8, ""},
+ {"(*URL).Parse", Method, 0, ""},
+ {"(*URL).Port", Method, 8, ""},
+ {"(*URL).Query", Method, 0, ""},
+ {"(*URL).Redacted", Method, 15, ""},
+ {"(*URL).RequestURI", Method, 0, ""},
+ {"(*URL).ResolveReference", Method, 0, ""},
+ {"(*URL).String", Method, 0, ""},
+ {"(*URL).UnmarshalBinary", Method, 8, ""},
+ {"(*Userinfo).Password", Method, 0, ""},
+ {"(*Userinfo).String", Method, 0, ""},
+ {"(*Userinfo).Username", Method, 0, ""},
+ {"(EscapeError).Error", Method, 0, ""},
+ {"(InvalidHostError).Error", Method, 6, ""},
+ {"(Values).Add", Method, 0, ""},
+ {"(Values).Del", Method, 0, ""},
+ {"(Values).Encode", Method, 0, ""},
+ {"(Values).Get", Method, 0, ""},
+ {"(Values).Has", Method, 17, ""},
+ {"(Values).Set", Method, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Err", Field, 0, ""},
+ {"Error.Op", Field, 0, ""},
+ {"Error.URL", Field, 0, ""},
+ {"EscapeError", Type, 0, ""},
+ {"InvalidHostError", Type, 6, ""},
+ {"JoinPath", Func, 19, "func(base string, elem ...string) (result string, err error)"},
+ {"Parse", Func, 0, "func(rawURL string) (*URL, error)"},
+ {"ParseQuery", Func, 0, "func(query string) (Values, error)"},
+ {"ParseRequestURI", Func, 0, "func(rawURL string) (*URL, error)"},
+ {"PathEscape", Func, 8, "func(s string) string"},
+ {"PathUnescape", Func, 8, "func(s string) (string, error)"},
+ {"QueryEscape", Func, 0, "func(s string) string"},
+ {"QueryUnescape", Func, 0, "func(s string) (string, error)"},
+ {"URL", Type, 0, ""},
+ {"URL.ForceQuery", Field, 7, ""},
+ {"URL.Fragment", Field, 0, ""},
+ {"URL.Host", Field, 0, ""},
+ {"URL.OmitHost", Field, 19, ""},
+ {"URL.Opaque", Field, 0, ""},
+ {"URL.Path", Field, 0, ""},
+ {"URL.RawFragment", Field, 15, ""},
+ {"URL.RawPath", Field, 5, ""},
+ {"URL.RawQuery", Field, 0, ""},
+ {"URL.Scheme", Field, 0, ""},
+ {"URL.User", Field, 0, ""},
+ {"User", Func, 0, "func(username string) *Userinfo"},
+ {"UserPassword", Func, 0, "func(username string, password string) *Userinfo"},
+ {"Userinfo", Type, 0, ""},
+ {"Values", Type, 0, ""},
+ },
+ "os": {
+ {"(*File).Chdir", Method, 0, ""},
+ {"(*File).Chmod", Method, 0, ""},
+ {"(*File).Chown", Method, 0, ""},
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).Fd", Method, 0, ""},
+ {"(*File).Name", Method, 0, ""},
+ {"(*File).Read", Method, 0, ""},
+ {"(*File).ReadAt", Method, 0, ""},
+ {"(*File).ReadDir", Method, 16, ""},
+ {"(*File).ReadFrom", Method, 15, ""},
+ {"(*File).Readdir", Method, 0, ""},
+ {"(*File).Readdirnames", Method, 0, ""},
+ {"(*File).Seek", Method, 0, ""},
+ {"(*File).SetDeadline", Method, 10, ""},
+ {"(*File).SetReadDeadline", Method, 10, ""},
+ {"(*File).SetWriteDeadline", Method, 10, ""},
+ {"(*File).Stat", Method, 0, ""},
+ {"(*File).Sync", Method, 0, ""},
+ {"(*File).SyscallConn", Method, 12, ""},
+ {"(*File).Truncate", Method, 0, ""},
+ {"(*File).Write", Method, 0, ""},
+ {"(*File).WriteAt", Method, 0, ""},
+ {"(*File).WriteString", Method, 0, ""},
+ {"(*File).WriteTo", Method, 22, ""},
+ {"(*LinkError).Error", Method, 0, ""},
+ {"(*LinkError).Unwrap", Method, 13, ""},
+ {"(*PathError).Error", Method, 0, ""},
+ {"(*PathError).Timeout", Method, 10, ""},
+ {"(*PathError).Unwrap", Method, 13, ""},
+ {"(*Process).Kill", Method, 0, ""},
+ {"(*Process).Release", Method, 0, ""},
+ {"(*Process).Signal", Method, 0, ""},
+ {"(*Process).Wait", Method, 0, ""},
+ {"(*ProcessState).ExitCode", Method, 12, ""},
+ {"(*ProcessState).Exited", Method, 0, ""},
+ {"(*ProcessState).Pid", Method, 0, ""},
+ {"(*ProcessState).String", Method, 0, ""},
+ {"(*ProcessState).Success", Method, 0, ""},
+ {"(*ProcessState).Sys", Method, 0, ""},
+ {"(*ProcessState).SysUsage", Method, 0, ""},
+ {"(*ProcessState).SystemTime", Method, 0, ""},
+ {"(*ProcessState).UserTime", Method, 0, ""},
+ {"(*Root).Chmod", Method, 25, ""},
+ {"(*Root).Chown", Method, 25, ""},
+ {"(*Root).Chtimes", Method, 25, ""},
+ {"(*Root).Close", Method, 24, ""},
+ {"(*Root).Create", Method, 24, ""},
+ {"(*Root).FS", Method, 24, ""},
+ {"(*Root).Lchown", Method, 25, ""},
+ {"(*Root).Link", Method, 25, ""},
+ {"(*Root).Lstat", Method, 24, ""},
+ {"(*Root).Mkdir", Method, 24, ""},
+ {"(*Root).MkdirAll", Method, 25, ""},
+ {"(*Root).Name", Method, 24, ""},
+ {"(*Root).Open", Method, 24, ""},
+ {"(*Root).OpenFile", Method, 24, ""},
+ {"(*Root).OpenRoot", Method, 24, ""},
+ {"(*Root).ReadFile", Method, 25, ""},
+ {"(*Root).Readlink", Method, 25, ""},
+ {"(*Root).Remove", Method, 24, ""},
+ {"(*Root).RemoveAll", Method, 25, ""},
+ {"(*Root).Rename", Method, 25, ""},
+ {"(*Root).Stat", Method, 24, ""},
+ {"(*Root).Symlink", Method, 25, ""},
+ {"(*Root).WriteFile", Method, 25, ""},
+ {"(*SyscallError).Error", Method, 0, ""},
+ {"(*SyscallError).Timeout", Method, 10, ""},
+ {"(*SyscallError).Unwrap", Method, 13, ""},
+ {"(FileMode).IsDir", Method, 0, ""},
+ {"(FileMode).IsRegular", Method, 1, ""},
+ {"(FileMode).Perm", Method, 0, ""},
+ {"(FileMode).String", Method, 0, ""},
+ {"Args", Var, 0, ""},
+ {"Chdir", Func, 0, "func(dir string) error"},
+ {"Chmod", Func, 0, "func(name string, mode FileMode) error"},
+ {"Chown", Func, 0, "func(name string, uid int, gid int) error"},
+ {"Chtimes", Func, 0, "func(name string, atime time.Time, mtime time.Time) error"},
+ {"Clearenv", Func, 0, "func()"},
+ {"CopyFS", Func, 23, "func(dir string, fsys fs.FS) error"},
+ {"Create", Func, 0, "func(name string) (*File, error)"},
+ {"CreateTemp", Func, 16, "func(dir string, pattern string) (*File, error)"},
+ {"DevNull", Const, 0, ""},
+ {"DirEntry", Type, 16, ""},
+ {"DirFS", Func, 16, "func(dir string) fs.FS"},
+ {"Environ", Func, 0, "func() []string"},
+ {"ErrClosed", Var, 8, ""},
+ {"ErrDeadlineExceeded", Var, 15, ""},
+ {"ErrExist", Var, 0, ""},
+ {"ErrInvalid", Var, 0, ""},
+ {"ErrNoDeadline", Var, 10, ""},
+ {"ErrNotExist", Var, 0, ""},
+ {"ErrPermission", Var, 0, ""},
+ {"ErrProcessDone", Var, 16, ""},
+ {"Executable", Func, 8, "func() (string, error)"},
+ {"Exit", Func, 0, "func(code int)"},
+ {"Expand", Func, 0, "func(s string, mapping func(string) string) string"},
+ {"ExpandEnv", Func, 0, "func(s string) string"},
+ {"File", Type, 0, ""},
+ {"FileInfo", Type, 0, ""},
+ {"FileMode", Type, 0, ""},
+ {"FindProcess", Func, 0, "func(pid int) (*Process, error)"},
+ {"Getegid", Func, 0, "func() int"},
+ {"Getenv", Func, 0, "func(key string) string"},
+ {"Geteuid", Func, 0, "func() int"},
+ {"Getgid", Func, 0, "func() int"},
+ {"Getgroups", Func, 0, "func() ([]int, error)"},
+ {"Getpagesize", Func, 0, "func() int"},
+ {"Getpid", Func, 0, "func() int"},
+ {"Getppid", Func, 0, "func() int"},
+ {"Getuid", Func, 0, "func() int"},
+ {"Getwd", Func, 0, "func() (dir string, err error)"},
+ {"Hostname", Func, 0, "func() (name string, err error)"},
+ {"Interrupt", Var, 0, ""},
+ {"IsExist", Func, 0, "func(err error) bool"},
+ {"IsNotExist", Func, 0, "func(err error) bool"},
+ {"IsPathSeparator", Func, 0, "func(c uint8) bool"},
+ {"IsPermission", Func, 0, "func(err error) bool"},
+ {"IsTimeout", Func, 10, "func(err error) bool"},
+ {"Kill", Var, 0, ""},
+ {"Lchown", Func, 0, "func(name string, uid int, gid int) error"},
+ {"Link", Func, 0, "func(oldname string, newname string) error"},
+ {"LinkError", Type, 0, ""},
+ {"LinkError.Err", Field, 0, ""},
+ {"LinkError.New", Field, 0, ""},
+ {"LinkError.Old", Field, 0, ""},
+ {"LinkError.Op", Field, 0, ""},
+ {"LookupEnv", Func, 5, "func(key string) (string, bool)"},
+ {"Lstat", Func, 0, "func(name string) (FileInfo, error)"},
+ {"Mkdir", Func, 0, "func(name string, perm FileMode) error"},
+ {"MkdirAll", Func, 0, "func(path string, perm FileMode) error"},
+ {"MkdirTemp", Func, 16, "func(dir string, pattern string) (string, error)"},
+ {"ModeAppend", Const, 0, ""},
+ {"ModeCharDevice", Const, 0, ""},
+ {"ModeDevice", Const, 0, ""},
+ {"ModeDir", Const, 0, ""},
+ {"ModeExclusive", Const, 0, ""},
+ {"ModeIrregular", Const, 11, ""},
+ {"ModeNamedPipe", Const, 0, ""},
+ {"ModePerm", Const, 0, ""},
+ {"ModeSetgid", Const, 0, ""},
+ {"ModeSetuid", Const, 0, ""},
+ {"ModeSocket", Const, 0, ""},
+ {"ModeSticky", Const, 0, ""},
+ {"ModeSymlink", Const, 0, ""},
+ {"ModeTemporary", Const, 0, ""},
+ {"ModeType", Const, 0, ""},
+ {"NewFile", Func, 0, "func(fd uintptr, name string) *File"},
+ {"NewSyscallError", Func, 0, "func(syscall string, err error) error"},
+ {"O_APPEND", Const, 0, ""},
+ {"O_CREATE", Const, 0, ""},
+ {"O_EXCL", Const, 0, ""},
+ {"O_RDONLY", Const, 0, ""},
+ {"O_RDWR", Const, 0, ""},
+ {"O_SYNC", Const, 0, ""},
+ {"O_TRUNC", Const, 0, ""},
+ {"O_WRONLY", Const, 0, ""},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"OpenFile", Func, 0, "func(name string, flag int, perm FileMode) (*File, error)"},
+ {"OpenInRoot", Func, 24, "func(dir string, name string) (*File, error)"},
+ {"OpenRoot", Func, 24, "func(name string) (*Root, error)"},
+ {"PathError", Type, 0, ""},
+ {"PathError.Err", Field, 0, ""},
+ {"PathError.Op", Field, 0, ""},
+ {"PathError.Path", Field, 0, ""},
+ {"PathListSeparator", Const, 0, ""},
+ {"PathSeparator", Const, 0, ""},
+ {"Pipe", Func, 0, "func() (r *File, w *File, err error)"},
+ {"ProcAttr", Type, 0, ""},
+ {"ProcAttr.Dir", Field, 0, ""},
+ {"ProcAttr.Env", Field, 0, ""},
+ {"ProcAttr.Files", Field, 0, ""},
+ {"ProcAttr.Sys", Field, 0, ""},
+ {"Process", Type, 0, ""},
+ {"Process.Pid", Field, 0, ""},
+ {"ProcessState", Type, 0, ""},
+ {"ReadDir", Func, 16, "func(name string) ([]DirEntry, error)"},
+ {"ReadFile", Func, 16, "func(name string) ([]byte, error)"},
+ {"Readlink", Func, 0, "func(name string) (string, error)"},
+ {"Remove", Func, 0, "func(name string) error"},
+ {"RemoveAll", Func, 0, "func(path string) error"},
+ {"Rename", Func, 0, "func(oldpath string, newpath string) error"},
+ {"Root", Type, 24, ""},
+ {"SEEK_CUR", Const, 0, ""},
+ {"SEEK_END", Const, 0, ""},
+ {"SEEK_SET", Const, 0, ""},
+ {"SameFile", Func, 0, "func(fi1 FileInfo, fi2 FileInfo) bool"},
+ {"Setenv", Func, 0, "func(key string, value string) error"},
+ {"Signal", Type, 0, ""},
+ {"StartProcess", Func, 0, "func(name string, argv []string, attr *ProcAttr) (*Process, error)"},
+ {"Stat", Func, 0, "func(name string) (FileInfo, error)"},
+ {"Stderr", Var, 0, ""},
+ {"Stdin", Var, 0, ""},
+ {"Stdout", Var, 0, ""},
+ {"Symlink", Func, 0, "func(oldname string, newname string) error"},
+ {"SyscallError", Type, 0, ""},
+ {"SyscallError.Err", Field, 0, ""},
+ {"SyscallError.Syscall", Field, 0, ""},
+ {"TempDir", Func, 0, "func() string"},
+ {"Truncate", Func, 0, "func(name string, size int64) error"},
+ {"Unsetenv", Func, 4, "func(key string) error"},
+ {"UserCacheDir", Func, 11, "func() (string, error)"},
+ {"UserConfigDir", Func, 13, "func() (string, error)"},
+ {"UserHomeDir", Func, 12, "func() (string, error)"},
+ {"WriteFile", Func, 16, "func(name string, data []byte, perm FileMode) error"},
+ },
+ "os/exec": {
+ {"(*Cmd).CombinedOutput", Method, 0, ""},
+ {"(*Cmd).Environ", Method, 19, ""},
+ {"(*Cmd).Output", Method, 0, ""},
+ {"(*Cmd).Run", Method, 0, ""},
+ {"(*Cmd).Start", Method, 0, ""},
+ {"(*Cmd).StderrPipe", Method, 0, ""},
+ {"(*Cmd).StdinPipe", Method, 0, ""},
+ {"(*Cmd).StdoutPipe", Method, 0, ""},
+ {"(*Cmd).String", Method, 13, ""},
+ {"(*Cmd).Wait", Method, 0, ""},
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Error).Unwrap", Method, 13, ""},
+ {"(*ExitError).Error", Method, 0, ""},
+ {"(ExitError).ExitCode", Method, 12, ""},
+ {"(ExitError).Exited", Method, 0, ""},
+ {"(ExitError).Pid", Method, 0, ""},
+ {"(ExitError).String", Method, 0, ""},
+ {"(ExitError).Success", Method, 0, ""},
+ {"(ExitError).Sys", Method, 0, ""},
+ {"(ExitError).SysUsage", Method, 0, ""},
+ {"(ExitError).SystemTime", Method, 0, ""},
+ {"(ExitError).UserTime", Method, 0, ""},
+ {"Cmd", Type, 0, ""},
+ {"Cmd.Args", Field, 0, ""},
+ {"Cmd.Cancel", Field, 20, ""},
+ {"Cmd.Dir", Field, 0, ""},
+ {"Cmd.Env", Field, 0, ""},
+ {"Cmd.Err", Field, 19, ""},
+ {"Cmd.ExtraFiles", Field, 0, ""},
+ {"Cmd.Path", Field, 0, ""},
+ {"Cmd.Process", Field, 0, ""},
+ {"Cmd.ProcessState", Field, 0, ""},
+ {"Cmd.Stderr", Field, 0, ""},
+ {"Cmd.Stdin", Field, 0, ""},
+ {"Cmd.Stdout", Field, 0, ""},
+ {"Cmd.SysProcAttr", Field, 0, ""},
+ {"Cmd.WaitDelay", Field, 20, ""},
+ {"Command", Func, 0, "func(name string, arg ...string) *Cmd"},
+ {"CommandContext", Func, 7, "func(ctx context.Context, name string, arg ...string) *Cmd"},
+ {"ErrDot", Var, 19, ""},
+ {"ErrNotFound", Var, 0, ""},
+ {"ErrWaitDelay", Var, 20, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Err", Field, 0, ""},
+ {"Error.Name", Field, 0, ""},
+ {"ExitError", Type, 0, ""},
+ {"ExitError.ProcessState", Field, 0, ""},
+ {"ExitError.Stderr", Field, 6, ""},
+ {"LookPath", Func, 0, "func(file string) (string, error)"},
+ },
+ "os/signal": {
+ {"Ignore", Func, 5, "func(sig ...os.Signal)"},
+ {"Ignored", Func, 11, "func(sig os.Signal) bool"},
+ {"Notify", Func, 0, "func(c chan<- os.Signal, sig ...os.Signal)"},
+ {"NotifyContext", Func, 16, "func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)"},
+ {"Reset", Func, 5, "func(sig ...os.Signal)"},
+ {"Stop", Func, 1, "func(c chan<- os.Signal)"},
+ },
+ "os/user": {
+ {"(*User).GroupIds", Method, 7, ""},
+ {"(UnknownGroupError).Error", Method, 7, ""},
+ {"(UnknownGroupIdError).Error", Method, 7, ""},
+ {"(UnknownUserError).Error", Method, 0, ""},
+ {"(UnknownUserIdError).Error", Method, 0, ""},
+ {"Current", Func, 0, "func() (*User, error)"},
+ {"Group", Type, 7, ""},
+ {"Group.Gid", Field, 7, ""},
+ {"Group.Name", Field, 7, ""},
+ {"Lookup", Func, 0, "func(username string) (*User, error)"},
+ {"LookupGroup", Func, 7, "func(name string) (*Group, error)"},
+ {"LookupGroupId", Func, 7, "func(gid string) (*Group, error)"},
+ {"LookupId", Func, 0, "func(uid string) (*User, error)"},
+ {"UnknownGroupError", Type, 7, ""},
+ {"UnknownGroupIdError", Type, 7, ""},
+ {"UnknownUserError", Type, 0, ""},
+ {"UnknownUserIdError", Type, 0, ""},
+ {"User", Type, 0, ""},
+ {"User.Gid", Field, 0, ""},
+ {"User.HomeDir", Field, 0, ""},
+ {"User.Name", Field, 0, ""},
+ {"User.Uid", Field, 0, ""},
+ {"User.Username", Field, 0, ""},
+ },
+ "path": {
+ {"Base", Func, 0, "func(path string) string"},
+ {"Clean", Func, 0, "func(path string) string"},
+ {"Dir", Func, 0, "func(path string) string"},
+ {"ErrBadPattern", Var, 0, ""},
+ {"Ext", Func, 0, "func(path string) string"},
+ {"IsAbs", Func, 0, "func(path string) bool"},
+ {"Join", Func, 0, "func(elem ...string) string"},
+ {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
+ {"Split", Func, 0, "func(path string) (dir string, file string)"},
+ },
+ "path/filepath": {
+ {"Abs", Func, 0, "func(path string) (string, error)"},
+ {"Base", Func, 0, "func(path string) string"},
+ {"Clean", Func, 0, "func(path string) string"},
+ {"Dir", Func, 0, "func(path string) string"},
+ {"ErrBadPattern", Var, 0, ""},
+ {"EvalSymlinks", Func, 0, "func(path string) (string, error)"},
+ {"Ext", Func, 0, "func(path string) string"},
+ {"FromSlash", Func, 0, "func(path string) string"},
+ {"Glob", Func, 0, "func(pattern string) (matches []string, err error)"},
+ {"HasPrefix", Func, 0, "func(p string, prefix string) bool"},
+ {"IsAbs", Func, 0, "func(path string) bool"},
+ {"IsLocal", Func, 20, "func(path string) bool"},
+ {"Join", Func, 0, "func(elem ...string) string"},
+ {"ListSeparator", Const, 0, ""},
+ {"Localize", Func, 23, "func(path string) (string, error)"},
+ {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
+ {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"},
+ {"Separator", Const, 0, ""},
+ {"SkipAll", Var, 20, ""},
+ {"SkipDir", Var, 0, ""},
+ {"Split", Func, 0, "func(path string) (dir string, file string)"},
+ {"SplitList", Func, 0, "func(path string) []string"},
+ {"ToSlash", Func, 0, "func(path string) string"},
+ {"VolumeName", Func, 0, "func(path string) string"},
+ {"Walk", Func, 0, "func(root string, fn WalkFunc) error"},
+ {"WalkDir", Func, 16, "func(root string, fn fs.WalkDirFunc) error"},
+ {"WalkFunc", Type, 0, ""},
+ },
+ "plugin": {
+ {"(*Plugin).Lookup", Method, 8, ""},
+ {"Open", Func, 8, "func(path string) (*Plugin, error)"},
+ {"Plugin", Type, 8, ""},
+ {"Symbol", Type, 8, ""},
+ },
+ "reflect": {
+ {"(*MapIter).Key", Method, 12, ""},
+ {"(*MapIter).Next", Method, 12, ""},
+ {"(*MapIter).Reset", Method, 18, ""},
+ {"(*MapIter).Value", Method, 12, ""},
+ {"(*ValueError).Error", Method, 0, ""},
+ {"(ChanDir).String", Method, 0, ""},
+ {"(Kind).String", Method, 0, ""},
+ {"(Method).IsExported", Method, 17, ""},
+ {"(StructField).IsExported", Method, 17, ""},
+ {"(StructTag).Get", Method, 0, ""},
+ {"(StructTag).Lookup", Method, 7, ""},
+ {"(Value).Addr", Method, 0, ""},
+ {"(Value).Bool", Method, 0, ""},
+ {"(Value).Bytes", Method, 0, ""},
+ {"(Value).Call", Method, 0, ""},
+ {"(Value).CallSlice", Method, 0, ""},
+ {"(Value).CanAddr", Method, 0, ""},
+ {"(Value).CanComplex", Method, 18, ""},
+ {"(Value).CanConvert", Method, 17, ""},
+ {"(Value).CanFloat", Method, 18, ""},
+ {"(Value).CanInt", Method, 18, ""},
+ {"(Value).CanInterface", Method, 0, ""},
+ {"(Value).CanSet", Method, 0, ""},
+ {"(Value).CanUint", Method, 18, ""},
+ {"(Value).Cap", Method, 0, ""},
+ {"(Value).Clear", Method, 21, ""},
+ {"(Value).Close", Method, 0, ""},
+ {"(Value).Comparable", Method, 20, ""},
+ {"(Value).Complex", Method, 0, ""},
+ {"(Value).Convert", Method, 1, ""},
+ {"(Value).Elem", Method, 0, ""},
+ {"(Value).Equal", Method, 20, ""},
+ {"(Value).Field", Method, 0, ""},
+ {"(Value).FieldByIndex", Method, 0, ""},
+ {"(Value).FieldByIndexErr", Method, 18, ""},
+ {"(Value).FieldByName", Method, 0, ""},
+ {"(Value).FieldByNameFunc", Method, 0, ""},
+ {"(Value).Float", Method, 0, ""},
+ {"(Value).Grow", Method, 20, ""},
+ {"(Value).Index", Method, 0, ""},
+ {"(Value).Int", Method, 0, ""},
+ {"(Value).Interface", Method, 0, ""},
+ {"(Value).InterfaceData", Method, 0, ""},
+ {"(Value).IsNil", Method, 0, ""},
+ {"(Value).IsValid", Method, 0, ""},
+ {"(Value).IsZero", Method, 13, ""},
+ {"(Value).Kind", Method, 0, ""},
+ {"(Value).Len", Method, 0, ""},
+ {"(Value).MapIndex", Method, 0, ""},
+ {"(Value).MapKeys", Method, 0, ""},
+ {"(Value).MapRange", Method, 12, ""},
+ {"(Value).Method", Method, 0, ""},
+ {"(Value).MethodByName", Method, 0, ""},
+ {"(Value).NumField", Method, 0, ""},
+ {"(Value).NumMethod", Method, 0, ""},
+ {"(Value).OverflowComplex", Method, 0, ""},
+ {"(Value).OverflowFloat", Method, 0, ""},
+ {"(Value).OverflowInt", Method, 0, ""},
+ {"(Value).OverflowUint", Method, 0, ""},
+ {"(Value).Pointer", Method, 0, ""},
+ {"(Value).Recv", Method, 0, ""},
+ {"(Value).Send", Method, 0, ""},
+ {"(Value).Seq", Method, 23, ""},
+ {"(Value).Seq2", Method, 23, ""},
+ {"(Value).Set", Method, 0, ""},
+ {"(Value).SetBool", Method, 0, ""},
+ {"(Value).SetBytes", Method, 0, ""},
+ {"(Value).SetCap", Method, 2, ""},
+ {"(Value).SetComplex", Method, 0, ""},
+ {"(Value).SetFloat", Method, 0, ""},
+ {"(Value).SetInt", Method, 0, ""},
+ {"(Value).SetIterKey", Method, 18, ""},
+ {"(Value).SetIterValue", Method, 18, ""},
+ {"(Value).SetLen", Method, 0, ""},
+ {"(Value).SetMapIndex", Method, 0, ""},
+ {"(Value).SetPointer", Method, 0, ""},
+ {"(Value).SetString", Method, 0, ""},
+ {"(Value).SetUint", Method, 0, ""},
+ {"(Value).SetZero", Method, 20, ""},
+ {"(Value).Slice", Method, 0, ""},
+ {"(Value).Slice3", Method, 2, ""},
+ {"(Value).String", Method, 0, ""},
+ {"(Value).TryRecv", Method, 0, ""},
+ {"(Value).TrySend", Method, 0, ""},
+ {"(Value).Type", Method, 0, ""},
+ {"(Value).Uint", Method, 0, ""},
+ {"(Value).UnsafeAddr", Method, 0, ""},
+ {"(Value).UnsafePointer", Method, 18, ""},
+ {"Append", Func, 0, "func(s Value, x ...Value) Value"},
+ {"AppendSlice", Func, 0, "func(s Value, t Value) Value"},
+ {"Array", Const, 0, ""},
+ {"ArrayOf", Func, 5, "func(length int, elem Type) Type"},
+ {"Bool", Const, 0, ""},
+ {"BothDir", Const, 0, ""},
+ {"Chan", Const, 0, ""},
+ {"ChanDir", Type, 0, ""},
+ {"ChanOf", Func, 1, "func(dir ChanDir, t Type) Type"},
+ {"Complex128", Const, 0, ""},
+ {"Complex64", Const, 0, ""},
+ {"Copy", Func, 0, "func(dst Value, src Value) int"},
+ {"DeepEqual", Func, 0, "func(x any, y any) bool"},
+ {"Float32", Const, 0, ""},
+ {"Float64", Const, 0, ""},
+ {"Func", Const, 0, ""},
+ {"FuncOf", Func, 5, "func(in []Type, out []Type, variadic bool) Type"},
+ {"Indirect", Func, 0, "func(v Value) Value"},
+ {"Int", Const, 0, ""},
+ {"Int16", Const, 0, ""},
+ {"Int32", Const, 0, ""},
+ {"Int64", Const, 0, ""},
+ {"Int8", Const, 0, ""},
+ {"Interface", Const, 0, ""},
+ {"Invalid", Const, 0, ""},
+ {"Kind", Type, 0, ""},
+ {"MakeChan", Func, 0, "func(typ Type, buffer int) Value"},
+ {"MakeFunc", Func, 1, "func(typ Type, fn func(args []Value) (results []Value)) Value"},
+ {"MakeMap", Func, 0, "func(typ Type) Value"},
+ {"MakeMapWithSize", Func, 9, "func(typ Type, n int) Value"},
+ {"MakeSlice", Func, 0, "func(typ Type, len int, cap int) Value"},
+ {"Map", Const, 0, ""},
+ {"MapIter", Type, 12, ""},
+ {"MapOf", Func, 1, "func(key Type, elem Type) Type"},
+ {"Method", Type, 0, ""},
+ {"Method.Func", Field, 0, ""},
+ {"Method.Index", Field, 0, ""},
+ {"Method.Name", Field, 0, ""},
+ {"Method.PkgPath", Field, 0, ""},
+ {"Method.Type", Field, 0, ""},
+ {"New", Func, 0, "func(typ Type) Value"},
+ {"NewAt", Func, 0, "func(typ Type, p unsafe.Pointer) Value"},
+ {"Pointer", Const, 18, ""},
+ {"PointerTo", Func, 18, "func(t Type) Type"},
+ {"Ptr", Const, 0, ""},
+ {"PtrTo", Func, 0, "func(t Type) Type"},
+ {"RecvDir", Const, 0, ""},
+ {"Select", Func, 1, "func(cases []SelectCase) (chosen int, recv Value, recvOK bool)"},
+ {"SelectCase", Type, 1, ""},
+ {"SelectCase.Chan", Field, 1, ""},
+ {"SelectCase.Dir", Field, 1, ""},
+ {"SelectCase.Send", Field, 1, ""},
+ {"SelectDefault", Const, 1, ""},
+ {"SelectDir", Type, 1, ""},
+ {"SelectRecv", Const, 1, ""},
+ {"SelectSend", Const, 1, ""},
+ {"SendDir", Const, 0, ""},
+ {"Slice", Const, 0, ""},
+ {"SliceAt", Func, 23, "func(typ Type, p unsafe.Pointer, n int) Value"},
+ {"SliceHeader", Type, 0, ""},
+ {"SliceHeader.Cap", Field, 0, ""},
+ {"SliceHeader.Data", Field, 0, ""},
+ {"SliceHeader.Len", Field, 0, ""},
+ {"SliceOf", Func, 1, "func(t Type) Type"},
+ {"String", Const, 0, ""},
+ {"StringHeader", Type, 0, ""},
+ {"StringHeader.Data", Field, 0, ""},
+ {"StringHeader.Len", Field, 0, ""},
+ {"Struct", Const, 0, ""},
+ {"StructField", Type, 0, ""},
+ {"StructField.Anonymous", Field, 0, ""},
+ {"StructField.Index", Field, 0, ""},
+ {"StructField.Name", Field, 0, ""},
+ {"StructField.Offset", Field, 0, ""},
+ {"StructField.PkgPath", Field, 0, ""},
+ {"StructField.Tag", Field, 0, ""},
+ {"StructField.Type", Field, 0, ""},
+ {"StructOf", Func, 7, "func(fields []StructField) Type"},
+ {"StructTag", Type, 0, ""},
+ {"Swapper", Func, 8, "func(slice any) func(i int, j int)"},
+ {"Type", Type, 0, ""},
+ {"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"},
+ {"TypeFor", Func, 22, "func[T any]() Type"},
+ {"TypeOf", Func, 0, "func(i any) Type"},
+ {"Uint", Const, 0, ""},
+ {"Uint16", Const, 0, ""},
+ {"Uint32", Const, 0, ""},
+ {"Uint64", Const, 0, ""},
+ {"Uint8", Const, 0, ""},
+ {"Uintptr", Const, 0, ""},
+ {"UnsafePointer", Const, 0, ""},
+ {"Value", Type, 0, ""},
+ {"ValueError", Type, 0, ""},
+ {"ValueError.Kind", Field, 0, ""},
+ {"ValueError.Method", Field, 0, ""},
+ {"ValueOf", Func, 0, "func(i any) Value"},
+ {"VisibleFields", Func, 17, "func(t Type) []StructField"},
+ {"Zero", Func, 0, "func(typ Type) Value"},
+ },
+ "regexp": {
+ {"(*Regexp).AppendText", Method, 24, ""},
+ {"(*Regexp).Copy", Method, 6, ""},
+ {"(*Regexp).Expand", Method, 0, ""},
+ {"(*Regexp).ExpandString", Method, 0, ""},
+ {"(*Regexp).Find", Method, 0, ""},
+ {"(*Regexp).FindAll", Method, 0, ""},
+ {"(*Regexp).FindAllIndex", Method, 0, ""},
+ {"(*Regexp).FindAllString", Method, 0, ""},
+ {"(*Regexp).FindAllStringIndex", Method, 0, ""},
+ {"(*Regexp).FindAllStringSubmatch", Method, 0, ""},
+ {"(*Regexp).FindAllStringSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindAllSubmatch", Method, 0, ""},
+ {"(*Regexp).FindAllSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindIndex", Method, 0, ""},
+ {"(*Regexp).FindReaderIndex", Method, 0, ""},
+ {"(*Regexp).FindReaderSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindString", Method, 0, ""},
+ {"(*Regexp).FindStringIndex", Method, 0, ""},
+ {"(*Regexp).FindStringSubmatch", Method, 0, ""},
+ {"(*Regexp).FindStringSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindSubmatch", Method, 0, ""},
+ {"(*Regexp).FindSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).LiteralPrefix", Method, 0, ""},
+ {"(*Regexp).Longest", Method, 1, ""},
+ {"(*Regexp).MarshalText", Method, 21, ""},
+ {"(*Regexp).Match", Method, 0, ""},
+ {"(*Regexp).MatchReader", Method, 0, ""},
+ {"(*Regexp).MatchString", Method, 0, ""},
+ {"(*Regexp).NumSubexp", Method, 0, ""},
+ {"(*Regexp).ReplaceAll", Method, 0, ""},
+ {"(*Regexp).ReplaceAllFunc", Method, 0, ""},
+ {"(*Regexp).ReplaceAllLiteral", Method, 0, ""},
+ {"(*Regexp).ReplaceAllLiteralString", Method, 0, ""},
+ {"(*Regexp).ReplaceAllString", Method, 0, ""},
+ {"(*Regexp).ReplaceAllStringFunc", Method, 0, ""},
+ {"(*Regexp).Split", Method, 1, ""},
+ {"(*Regexp).String", Method, 0, ""},
+ {"(*Regexp).SubexpIndex", Method, 15, ""},
+ {"(*Regexp).SubexpNames", Method, 0, ""},
+ {"(*Regexp).UnmarshalText", Method, 21, ""},
+ {"Compile", Func, 0, "func(expr string) (*Regexp, error)"},
+ {"CompilePOSIX", Func, 0, "func(expr string) (*Regexp, error)"},
+ {"Match", Func, 0, "func(pattern string, b []byte) (matched bool, err error)"},
+ {"MatchReader", Func, 0, "func(pattern string, r io.RuneReader) (matched bool, err error)"},
+ {"MatchString", Func, 0, "func(pattern string, s string) (matched bool, err error)"},
+ {"MustCompile", Func, 0, "func(str string) *Regexp"},
+ {"MustCompilePOSIX", Func, 0, "func(str string) *Regexp"},
+ {"QuoteMeta", Func, 0, "func(s string) string"},
+ {"Regexp", Type, 0, ""},
+ },
+ "regexp/syntax": {
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Inst).MatchEmptyWidth", Method, 0, ""},
+ {"(*Inst).MatchRune", Method, 0, ""},
+ {"(*Inst).MatchRunePos", Method, 3, ""},
+ {"(*Inst).String", Method, 0, ""},
+ {"(*Prog).Prefix", Method, 0, ""},
+ {"(*Prog).StartCond", Method, 0, ""},
+ {"(*Prog).String", Method, 0, ""},
+ {"(*Regexp).CapNames", Method, 0, ""},
+ {"(*Regexp).Equal", Method, 0, ""},
+ {"(*Regexp).MaxCap", Method, 0, ""},
+ {"(*Regexp).Simplify", Method, 0, ""},
+ {"(*Regexp).String", Method, 0, ""},
+ {"(ErrorCode).String", Method, 0, ""},
+ {"(InstOp).String", Method, 3, ""},
+ {"(Op).String", Method, 11, ""},
+ {"ClassNL", Const, 0, ""},
+ {"Compile", Func, 0, "func(re *Regexp) (*Prog, error)"},
+ {"DotNL", Const, 0, ""},
+ {"EmptyBeginLine", Const, 0, ""},
+ {"EmptyBeginText", Const, 0, ""},
+ {"EmptyEndLine", Const, 0, ""},
+ {"EmptyEndText", Const, 0, ""},
+ {"EmptyNoWordBoundary", Const, 0, ""},
+ {"EmptyOp", Type, 0, ""},
+ {"EmptyOpContext", Func, 0, "func(r1 rune, r2 rune) EmptyOp"},
+ {"EmptyWordBoundary", Const, 0, ""},
+ {"ErrInternalError", Const, 0, ""},
+ {"ErrInvalidCharClass", Const, 0, ""},
+ {"ErrInvalidCharRange", Const, 0, ""},
+ {"ErrInvalidEscape", Const, 0, ""},
+ {"ErrInvalidNamedCapture", Const, 0, ""},
+ {"ErrInvalidPerlOp", Const, 0, ""},
+ {"ErrInvalidRepeatOp", Const, 0, ""},
+ {"ErrInvalidRepeatSize", Const, 0, ""},
+ {"ErrInvalidUTF8", Const, 0, ""},
+ {"ErrLarge", Const, 20, ""},
+ {"ErrMissingBracket", Const, 0, ""},
+ {"ErrMissingParen", Const, 0, ""},
+ {"ErrMissingRepeatArgument", Const, 0, ""},
+ {"ErrNestingDepth", Const, 19, ""},
+ {"ErrTrailingBackslash", Const, 0, ""},
+ {"ErrUnexpectedParen", Const, 1, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Code", Field, 0, ""},
+ {"Error.Expr", Field, 0, ""},
+ {"ErrorCode", Type, 0, ""},
+ {"Flags", Type, 0, ""},
+ {"FoldCase", Const, 0, ""},
+ {"Inst", Type, 0, ""},
+ {"Inst.Arg", Field, 0, ""},
+ {"Inst.Op", Field, 0, ""},
+ {"Inst.Out", Field, 0, ""},
+ {"Inst.Rune", Field, 0, ""},
+ {"InstAlt", Const, 0, ""},
+ {"InstAltMatch", Const, 0, ""},
+ {"InstCapture", Const, 0, ""},
+ {"InstEmptyWidth", Const, 0, ""},
+ {"InstFail", Const, 0, ""},
+ {"InstMatch", Const, 0, ""},
+ {"InstNop", Const, 0, ""},
+ {"InstOp", Type, 0, ""},
+ {"InstRune", Const, 0, ""},
+ {"InstRune1", Const, 0, ""},
+ {"InstRuneAny", Const, 0, ""},
+ {"InstRuneAnyNotNL", Const, 0, ""},
+ {"IsWordChar", Func, 0, "func(r rune) bool"},
+ {"Literal", Const, 0, ""},
+ {"MatchNL", Const, 0, ""},
+ {"NonGreedy", Const, 0, ""},
+ {"OneLine", Const, 0, ""},
+ {"Op", Type, 0, ""},
+ {"OpAlternate", Const, 0, ""},
+ {"OpAnyChar", Const, 0, ""},
+ {"OpAnyCharNotNL", Const, 0, ""},
+ {"OpBeginLine", Const, 0, ""},
+ {"OpBeginText", Const, 0, ""},
+ {"OpCapture", Const, 0, ""},
+ {"OpCharClass", Const, 0, ""},
+ {"OpConcat", Const, 0, ""},
+ {"OpEmptyMatch", Const, 0, ""},
+ {"OpEndLine", Const, 0, ""},
+ {"OpEndText", Const, 0, ""},
+ {"OpLiteral", Const, 0, ""},
+ {"OpNoMatch", Const, 0, ""},
+ {"OpNoWordBoundary", Const, 0, ""},
+ {"OpPlus", Const, 0, ""},
+ {"OpQuest", Const, 0, ""},
+ {"OpRepeat", Const, 0, ""},
+ {"OpStar", Const, 0, ""},
+ {"OpWordBoundary", Const, 0, ""},
+ {"POSIX", Const, 0, ""},
+ {"Parse", Func, 0, "func(s string, flags Flags) (*Regexp, error)"},
+ {"Perl", Const, 0, ""},
+ {"PerlX", Const, 0, ""},
+ {"Prog", Type, 0, ""},
+ {"Prog.Inst", Field, 0, ""},
+ {"Prog.NumCap", Field, 0, ""},
+ {"Prog.Start", Field, 0, ""},
+ {"Regexp", Type, 0, ""},
+ {"Regexp.Cap", Field, 0, ""},
+ {"Regexp.Flags", Field, 0, ""},
+ {"Regexp.Max", Field, 0, ""},
+ {"Regexp.Min", Field, 0, ""},
+ {"Regexp.Name", Field, 0, ""},
+ {"Regexp.Op", Field, 0, ""},
+ {"Regexp.Rune", Field, 0, ""},
+ {"Regexp.Rune0", Field, 0, ""},
+ {"Regexp.Sub", Field, 0, ""},
+ {"Regexp.Sub0", Field, 0, ""},
+ {"Simple", Const, 0, ""},
+ {"UnicodeGroups", Const, 0, ""},
+ {"WasDollar", Const, 0, ""},
+ },
+ "runtime": {
+ {"(*BlockProfileRecord).Stack", Method, 1, ""},
+ {"(*Frames).Next", Method, 7, ""},
+ {"(*Func).Entry", Method, 0, ""},
+ {"(*Func).FileLine", Method, 0, ""},
+ {"(*Func).Name", Method, 0, ""},
+ {"(*MemProfileRecord).InUseBytes", Method, 0, ""},
+ {"(*MemProfileRecord).InUseObjects", Method, 0, ""},
+ {"(*MemProfileRecord).Stack", Method, 0, ""},
+ {"(*PanicNilError).Error", Method, 21, ""},
+ {"(*PanicNilError).RuntimeError", Method, 21, ""},
+ {"(*Pinner).Pin", Method, 21, ""},
+ {"(*Pinner).Unpin", Method, 21, ""},
+ {"(*StackRecord).Stack", Method, 0, ""},
+ {"(*TypeAssertionError).Error", Method, 0, ""},
+ {"(*TypeAssertionError).RuntimeError", Method, 0, ""},
+ {"(Cleanup).Stop", Method, 24, ""},
+ {"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"},
+ {"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"},
+ {"BlockProfileRecord", Type, 1, ""},
+ {"BlockProfileRecord.Count", Field, 1, ""},
+ {"BlockProfileRecord.Cycles", Field, 1, ""},
+ {"BlockProfileRecord.StackRecord", Field, 1, ""},
+ {"Breakpoint", Func, 0, "func()"},
+ {"CPUProfile", Func, 0, "func() []byte"},
+ {"Caller", Func, 0, "func(skip int) (pc uintptr, file string, line int, ok bool)"},
+ {"Callers", Func, 0, "func(skip int, pc []uintptr) int"},
+ {"CallersFrames", Func, 7, "func(callers []uintptr) *Frames"},
+ {"Cleanup", Type, 24, ""},
+ {"Compiler", Const, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Frame", Type, 7, ""},
+ {"Frame.Entry", Field, 7, ""},
+ {"Frame.File", Field, 7, ""},
+ {"Frame.Func", Field, 7, ""},
+ {"Frame.Function", Field, 7, ""},
+ {"Frame.Line", Field, 7, ""},
+ {"Frame.PC", Field, 7, ""},
+ {"Frames", Type, 7, ""},
+ {"Func", Type, 0, ""},
+ {"FuncForPC", Func, 0, "func(pc uintptr) *Func"},
+ {"GC", Func, 0, "func()"},
+ {"GOARCH", Const, 0, ""},
+ {"GOMAXPROCS", Func, 0, "func(n int) int"},
+ {"GOOS", Const, 0, ""},
+ {"GOROOT", Func, 0, "func() string"},
+ {"Goexit", Func, 0, "func()"},
+ {"GoroutineProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
+ {"Gosched", Func, 0, "func()"},
+ {"KeepAlive", Func, 7, "func(x any)"},
+ {"LockOSThread", Func, 0, "func()"},
+ {"MemProfile", Func, 0, "func(p []MemProfileRecord, inuseZero bool) (n int, ok bool)"},
+ {"MemProfileRate", Var, 0, ""},
+ {"MemProfileRecord", Type, 0, ""},
+ {"MemProfileRecord.AllocBytes", Field, 0, ""},
+ {"MemProfileRecord.AllocObjects", Field, 0, ""},
+ {"MemProfileRecord.FreeBytes", Field, 0, ""},
+ {"MemProfileRecord.FreeObjects", Field, 0, ""},
+ {"MemProfileRecord.Stack0", Field, 0, ""},
+ {"MemStats", Type, 0, ""},
+ {"MemStats.Alloc", Field, 0, ""},
+ {"MemStats.BuckHashSys", Field, 0, ""},
+ {"MemStats.BySize", Field, 0, ""},
+ {"MemStats.DebugGC", Field, 0, ""},
+ {"MemStats.EnableGC", Field, 0, ""},
+ {"MemStats.Frees", Field, 0, ""},
+ {"MemStats.GCCPUFraction", Field, 5, ""},
+ {"MemStats.GCSys", Field, 2, ""},
+ {"MemStats.HeapAlloc", Field, 0, ""},
+ {"MemStats.HeapIdle", Field, 0, ""},
+ {"MemStats.HeapInuse", Field, 0, ""},
+ {"MemStats.HeapObjects", Field, 0, ""},
+ {"MemStats.HeapReleased", Field, 0, ""},
+ {"MemStats.HeapSys", Field, 0, ""},
+ {"MemStats.LastGC", Field, 0, ""},
+ {"MemStats.Lookups", Field, 0, ""},
+ {"MemStats.MCacheInuse", Field, 0, ""},
+ {"MemStats.MCacheSys", Field, 0, ""},
+ {"MemStats.MSpanInuse", Field, 0, ""},
+ {"MemStats.MSpanSys", Field, 0, ""},
+ {"MemStats.Mallocs", Field, 0, ""},
+ {"MemStats.NextGC", Field, 0, ""},
+ {"MemStats.NumForcedGC", Field, 8, ""},
+ {"MemStats.NumGC", Field, 0, ""},
+ {"MemStats.OtherSys", Field, 2, ""},
+ {"MemStats.PauseEnd", Field, 4, ""},
+ {"MemStats.PauseNs", Field, 0, ""},
+ {"MemStats.PauseTotalNs", Field, 0, ""},
+ {"MemStats.StackInuse", Field, 0, ""},
+ {"MemStats.StackSys", Field, 0, ""},
+ {"MemStats.Sys", Field, 0, ""},
+ {"MemStats.TotalAlloc", Field, 0, ""},
+ {"MutexProfile", Func, 8, "func(p []BlockProfileRecord) (n int, ok bool)"},
+ {"NumCPU", Func, 0, "func() int"},
+ {"NumCgoCall", Func, 0, "func() int64"},
+ {"NumGoroutine", Func, 0, "func() int"},
+ {"PanicNilError", Type, 21, ""},
+ {"Pinner", Type, 21, ""},
+ {"ReadMemStats", Func, 0, "func(m *MemStats)"},
+ {"ReadTrace", Func, 5, "func() []byte"},
+ {"SetBlockProfileRate", Func, 1, "func(rate int)"},
+ {"SetCPUProfileRate", Func, 0, "func(hz int)"},
+ {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"},
+ {"SetDefaultGOMAXPROCS", Func, 25, "func()"},
+ {"SetFinalizer", Func, 0, "func(obj any, finalizer any)"},
+ {"SetMutexProfileFraction", Func, 8, "func(rate int) int"},
+ {"Stack", Func, 0, "func(buf []byte, all bool) int"},
+ {"StackRecord", Type, 0, ""},
+ {"StackRecord.Stack0", Field, 0, ""},
+ {"StartTrace", Func, 5, "func() error"},
+ {"StopTrace", Func, 5, "func()"},
+ {"ThreadCreateProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
+ {"TypeAssertionError", Type, 0, ""},
+ {"UnlockOSThread", Func, 0, "func()"},
+ {"Version", Func, 0, "func() string"},
+ },
+ "runtime/cgo": {
+ {"(Handle).Delete", Method, 17, ""},
+ {"(Handle).Value", Method, 17, ""},
+ {"Handle", Type, 17, ""},
+ {"Incomplete", Type, 20, ""},
+ {"NewHandle", Func, 17, ""},
+ },
+ "runtime/coverage": {
+ {"ClearCounters", Func, 20, "func() error"},
+ {"WriteCounters", Func, 20, "func(w io.Writer) error"},
+ {"WriteCountersDir", Func, 20, "func(dir string) error"},
+ {"WriteMeta", Func, 20, "func(w io.Writer) error"},
+ {"WriteMetaDir", Func, 20, "func(dir string) error"},
+ },
+ "runtime/debug": {
+ {"(*BuildInfo).String", Method, 18, ""},
+ {"BuildInfo", Type, 12, ""},
+ {"BuildInfo.Deps", Field, 12, ""},
+ {"BuildInfo.GoVersion", Field, 18, ""},
+ {"BuildInfo.Main", Field, 12, ""},
+ {"BuildInfo.Path", Field, 12, ""},
+ {"BuildInfo.Settings", Field, 18, ""},
+ {"BuildSetting", Type, 18, ""},
+ {"BuildSetting.Key", Field, 18, ""},
+ {"BuildSetting.Value", Field, 18, ""},
+ {"CrashOptions", Type, 23, ""},
+ {"FreeOSMemory", Func, 1, "func()"},
+ {"GCStats", Type, 1, ""},
+ {"GCStats.LastGC", Field, 1, ""},
+ {"GCStats.NumGC", Field, 1, ""},
+ {"GCStats.Pause", Field, 1, ""},
+ {"GCStats.PauseEnd", Field, 4, ""},
+ {"GCStats.PauseQuantiles", Field, 1, ""},
+ {"GCStats.PauseTotal", Field, 1, ""},
+ {"Module", Type, 12, ""},
+ {"Module.Path", Field, 12, ""},
+ {"Module.Replace", Field, 12, ""},
+ {"Module.Sum", Field, 12, ""},
+ {"Module.Version", Field, 12, ""},
+ {"ParseBuildInfo", Func, 18, "func(data string) (bi *BuildInfo, err error)"},
+ {"PrintStack", Func, 0, "func()"},
+ {"ReadBuildInfo", Func, 12, "func() (info *BuildInfo, ok bool)"},
+ {"ReadGCStats", Func, 1, "func(stats *GCStats)"},
+ {"SetCrashOutput", Func, 23, "func(f *os.File, opts CrashOptions) error"},
+ {"SetGCPercent", Func, 1, "func(percent int) int"},
+ {"SetMaxStack", Func, 2, "func(bytes int) int"},
+ {"SetMaxThreads", Func, 2, "func(threads int) int"},
+ {"SetMemoryLimit", Func, 19, "func(limit int64) int64"},
+ {"SetPanicOnFault", Func, 3, "func(enabled bool) bool"},
+ {"SetTraceback", Func, 6, "func(level string)"},
+ {"Stack", Func, 0, "func() []byte"},
+ {"WriteHeapDump", Func, 3, "func(fd uintptr)"},
+ },
+ "runtime/metrics": {
+ {"(Value).Float64", Method, 16, ""},
+ {"(Value).Float64Histogram", Method, 16, ""},
+ {"(Value).Kind", Method, 16, ""},
+ {"(Value).Uint64", Method, 16, ""},
+ {"All", Func, 16, "func() []Description"},
+ {"Description", Type, 16, ""},
+ {"Description.Cumulative", Field, 16, ""},
+ {"Description.Description", Field, 16, ""},
+ {"Description.Kind", Field, 16, ""},
+ {"Description.Name", Field, 16, ""},
+ {"Float64Histogram", Type, 16, ""},
+ {"Float64Histogram.Buckets", Field, 16, ""},
+ {"Float64Histogram.Counts", Field, 16, ""},
+ {"KindBad", Const, 16, ""},
+ {"KindFloat64", Const, 16, ""},
+ {"KindFloat64Histogram", Const, 16, ""},
+ {"KindUint64", Const, 16, ""},
+ {"Read", Func, 16, "func(m []Sample)"},
+ {"Sample", Type, 16, ""},
+ {"Sample.Name", Field, 16, ""},
+ {"Sample.Value", Field, 16, ""},
+ {"Value", Type, 16, ""},
+ {"ValueKind", Type, 16, ""},
+ },
+ "runtime/pprof": {
+ {"(*Profile).Add", Method, 0, ""},
+ {"(*Profile).Count", Method, 0, ""},
+ {"(*Profile).Name", Method, 0, ""},
+ {"(*Profile).Remove", Method, 0, ""},
+ {"(*Profile).WriteTo", Method, 0, ""},
+ {"Do", Func, 9, "func(ctx context.Context, labels LabelSet, f func(context.Context))"},
+ {"ForLabels", Func, 9, "func(ctx context.Context, f func(key string, value string) bool)"},
+ {"Label", Func, 9, "func(ctx context.Context, key string) (string, bool)"},
+ {"LabelSet", Type, 9, ""},
+ {"Labels", Func, 9, "func(args ...string) LabelSet"},
+ {"Lookup", Func, 0, "func(name string) *Profile"},
+ {"NewProfile", Func, 0, "func(name string) *Profile"},
+ {"Profile", Type, 0, ""},
+ {"Profiles", Func, 0, "func() []*Profile"},
+ {"SetGoroutineLabels", Func, 9, "func(ctx context.Context)"},
+ {"StartCPUProfile", Func, 0, "func(w io.Writer) error"},
+ {"StopCPUProfile", Func, 0, "func()"},
+ {"WithLabels", Func, 9, "func(ctx context.Context, labels LabelSet) context.Context"},
+ {"WriteHeapProfile", Func, 0, "func(w io.Writer) error"},
+ },
+ "runtime/trace": {
+ {"(*FlightRecorder).Enabled", Method, 25, ""},
+ {"(*FlightRecorder).Start", Method, 25, ""},
+ {"(*FlightRecorder).Stop", Method, 25, ""},
+ {"(*FlightRecorder).WriteTo", Method, 25, ""},
+ {"(*Region).End", Method, 11, ""},
+ {"(*Task).End", Method, 11, ""},
+ {"FlightRecorder", Type, 25, ""},
+ {"FlightRecorderConfig", Type, 25, ""},
+ {"FlightRecorderConfig.MaxBytes", Field, 25, ""},
+ {"FlightRecorderConfig.MinAge", Field, 25, ""},
+ {"IsEnabled", Func, 11, "func() bool"},
+ {"Log", Func, 11, "func(ctx context.Context, category string, message string)"},
+ {"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"},
+ {"NewFlightRecorder", Func, 25, "func(cfg FlightRecorderConfig) *FlightRecorder"},
+ {"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"},
+ {"Region", Type, 11, ""},
+ {"Start", Func, 5, "func(w io.Writer) error"},
+ {"StartRegion", Func, 11, "func(ctx context.Context, regionType string) *Region"},
+ {"Stop", Func, 5, "func()"},
+ {"Task", Type, 11, ""},
+ {"WithRegion", Func, 11, "func(ctx context.Context, regionType string, fn func())"},
+ },
+ "slices": {
+ {"All", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
+ {"AppendSeq", Func, 23, "func[Slice ~[]E, E any](s Slice, seq iter.Seq[E]) Slice"},
+ {"Backward", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
+ {"BinarySearch", Func, 21, "func[S ~[]E, E cmp.Ordered](x S, target E) (int, bool)"},
+ {"BinarySearchFunc", Func, 21, "func[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool)"},
+ {"Chunk", Func, 23, "func[Slice ~[]E, E any](s Slice, n int) iter.Seq[Slice]"},
+ {"Clip", Func, 21, "func[S ~[]E, E any](s S) S"},
+ {"Clone", Func, 21, "func[S ~[]E, E any](s S) S"},
+ {"Collect", Func, 23, "func[E any](seq iter.Seq[E]) []E"},
+ {"Compact", Func, 21, "func[S ~[]E, E comparable](s S) S"},
+ {"CompactFunc", Func, 21, "func[S ~[]E, E any](s S, eq func(E, E) bool) S"},
+ {"Compare", Func, 21, "func[S ~[]E, E cmp.Ordered](s1 S, s2 S) int"},
+ {"CompareFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int"},
+ {"Concat", Func, 22, "func[S ~[]E, E any](slices ...S) S"},
+ {"Contains", Func, 21, "func[S ~[]E, E comparable](s S, v E) bool"},
+ {"ContainsFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) bool"},
+ {"Delete", Func, 21, "func[S ~[]E, E any](s S, i int, j int) S"},
+ {"DeleteFunc", Func, 21, "func[S ~[]E, E any](s S, del func(E) bool) S"},
+ {"Equal", Func, 21, "func[S ~[]E, E comparable](s1 S, s2 S) bool"},
+ {"EqualFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool"},
+ {"Grow", Func, 21, "func[S ~[]E, E any](s S, n int) S"},
+ {"Index", Func, 21, "func[S ~[]E, E comparable](s S, v E) int"},
+ {"IndexFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) int"},
+ {"Insert", Func, 21, "func[S ~[]E, E any](s S, i int, v ...E) S"},
+ {"IsSorted", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) bool"},
+ {"IsSortedFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) bool"},
+ {"Max", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
+ {"MaxFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
+ {"Min", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
+ {"MinFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
+ {"Repeat", Func, 23, "func[S ~[]E, E any](x S, count int) S"},
+ {"Replace", Func, 21, "func[S ~[]E, E any](s S, i int, j int, v ...E) S"},
+ {"Reverse", Func, 21, "func[S ~[]E, E any](s S)"},
+ {"Sort", Func, 21, "func[S ~[]E, E cmp.Ordered](x S)"},
+ {"SortFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
+ {"SortStableFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
+ {"Sorted", Func, 23, "func[E cmp.Ordered](seq iter.Seq[E]) []E"},
+ {"SortedFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
+ {"SortedStableFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
+ {"Values", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq[E]"},
+ },
+ "sort": {
+ {"(Float64Slice).Len", Method, 0, ""},
+ {"(Float64Slice).Less", Method, 0, ""},
+ {"(Float64Slice).Search", Method, 0, ""},
+ {"(Float64Slice).Sort", Method, 0, ""},
+ {"(Float64Slice).Swap", Method, 0, ""},
+ {"(IntSlice).Len", Method, 0, ""},
+ {"(IntSlice).Less", Method, 0, ""},
+ {"(IntSlice).Search", Method, 0, ""},
+ {"(IntSlice).Sort", Method, 0, ""},
+ {"(IntSlice).Swap", Method, 0, ""},
+ {"(StringSlice).Len", Method, 0, ""},
+ {"(StringSlice).Less", Method, 0, ""},
+ {"(StringSlice).Search", Method, 0, ""},
+ {"(StringSlice).Sort", Method, 0, ""},
+ {"(StringSlice).Swap", Method, 0, ""},
+ {"Find", Func, 19, "func(n int, cmp func(int) int) (i int, found bool)"},
+ {"Float64Slice", Type, 0, ""},
+ {"Float64s", Func, 0, "func(x []float64)"},
+ {"Float64sAreSorted", Func, 0, "func(x []float64) bool"},
+ {"IntSlice", Type, 0, ""},
+ {"Interface", Type, 0, ""},
+ {"Ints", Func, 0, "func(x []int)"},
+ {"IntsAreSorted", Func, 0, "func(x []int) bool"},
+ {"IsSorted", Func, 0, "func(data Interface) bool"},
+ {"Reverse", Func, 1, "func(data Interface) Interface"},
+ {"Search", Func, 0, "func(n int, f func(int) bool) int"},
+ {"SearchFloat64s", Func, 0, "func(a []float64, x float64) int"},
+ {"SearchInts", Func, 0, "func(a []int, x int) int"},
+ {"SearchStrings", Func, 0, "func(a []string, x string) int"},
+ {"Slice", Func, 8, "func(x any, less func(i int, j int) bool)"},
+ {"SliceIsSorted", Func, 8, "func(x any, less func(i int, j int) bool) bool"},
+ {"SliceStable", Func, 8, "func(x any, less func(i int, j int) bool)"},
+ {"Sort", Func, 0, "func(data Interface)"},
+ {"Stable", Func, 2, "func(data Interface)"},
+ {"StringSlice", Type, 0, ""},
+ {"Strings", Func, 0, "func(x []string)"},
+ {"StringsAreSorted", Func, 0, "func(x []string) bool"},
+ },
+ "strconv": {
+ {"(*NumError).Error", Method, 0, ""},
+ {"(*NumError).Unwrap", Method, 14, ""},
+ {"AppendBool", Func, 0, "func(dst []byte, b bool) []byte"},
+ {"AppendFloat", Func, 0, "func(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte"},
+ {"AppendInt", Func, 0, "func(dst []byte, i int64, base int) []byte"},
+ {"AppendQuote", Func, 0, "func(dst []byte, s string) []byte"},
+ {"AppendQuoteRune", Func, 0, "func(dst []byte, r rune) []byte"},
+ {"AppendQuoteRuneToASCII", Func, 0, "func(dst []byte, r rune) []byte"},
+ {"AppendQuoteRuneToGraphic", Func, 6, "func(dst []byte, r rune) []byte"},
+ {"AppendQuoteToASCII", Func, 0, "func(dst []byte, s string) []byte"},
+ {"AppendQuoteToGraphic", Func, 6, "func(dst []byte, s string) []byte"},
+ {"AppendUint", Func, 0, "func(dst []byte, i uint64, base int) []byte"},
+ {"Atoi", Func, 0, "func(s string) (int, error)"},
+ {"CanBackquote", Func, 0, "func(s string) bool"},
+ {"ErrRange", Var, 0, ""},
+ {"ErrSyntax", Var, 0, ""},
+ {"FormatBool", Func, 0, "func(b bool) string"},
+ {"FormatComplex", Func, 15, "func(c complex128, fmt byte, prec int, bitSize int) string"},
+ {"FormatFloat", Func, 0, "func(f float64, fmt byte, prec int, bitSize int) string"},
+ {"FormatInt", Func, 0, "func(i int64, base int) string"},
+ {"FormatUint", Func, 0, "func(i uint64, base int) string"},
+ {"IntSize", Const, 0, ""},
+ {"IsGraphic", Func, 6, "func(r rune) bool"},
+ {"IsPrint", Func, 0, "func(r rune) bool"},
+ {"Itoa", Func, 0, "func(i int) string"},
+ {"NumError", Type, 0, ""},
+ {"NumError.Err", Field, 0, ""},
+ {"NumError.Func", Field, 0, ""},
+ {"NumError.Num", Field, 0, ""},
+ {"ParseBool", Func, 0, "func(str string) (bool, error)"},
+ {"ParseComplex", Func, 15, "func(s string, bitSize int) (complex128, error)"},
+ {"ParseFloat", Func, 0, "func(s string, bitSize int) (float64, error)"},
+ {"ParseInt", Func, 0, "func(s string, base int, bitSize int) (i int64, err error)"},
+ {"ParseUint", Func, 0, "func(s string, base int, bitSize int) (uint64, error)"},
+ {"Quote", Func, 0, "func(s string) string"},
+ {"QuoteRune", Func, 0, "func(r rune) string"},
+ {"QuoteRuneToASCII", Func, 0, "func(r rune) string"},
+ {"QuoteRuneToGraphic", Func, 6, "func(r rune) string"},
+ {"QuoteToASCII", Func, 0, "func(s string) string"},
+ {"QuoteToGraphic", Func, 6, "func(s string) string"},
+ {"QuotedPrefix", Func, 17, "func(s string) (string, error)"},
+ {"Unquote", Func, 0, "func(s string) (string, error)"},
+ {"UnquoteChar", Func, 0, "func(s string, quote byte) (value rune, multibyte bool, tail string, err error)"},
+ },
+ "strings": {
+ {"(*Builder).Cap", Method, 12, ""},
+ {"(*Builder).Grow", Method, 10, ""},
+ {"(*Builder).Len", Method, 10, ""},
+ {"(*Builder).Reset", Method, 10, ""},
+ {"(*Builder).String", Method, 10, ""},
+ {"(*Builder).Write", Method, 10, ""},
+ {"(*Builder).WriteByte", Method, 10, ""},
+ {"(*Builder).WriteRune", Method, 10, ""},
+ {"(*Builder).WriteString", Method, 10, ""},
+ {"(*Reader).Len", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadAt", Method, 0, ""},
+ {"(*Reader).ReadByte", Method, 0, ""},
+ {"(*Reader).ReadRune", Method, 0, ""},
+ {"(*Reader).Reset", Method, 7, ""},
+ {"(*Reader).Seek", Method, 0, ""},
+ {"(*Reader).Size", Method, 5, ""},
+ {"(*Reader).UnreadByte", Method, 0, ""},
+ {"(*Reader).UnreadRune", Method, 0, ""},
+ {"(*Reader).WriteTo", Method, 1, ""},
+ {"(*Replacer).Replace", Method, 0, ""},
+ {"(*Replacer).WriteString", Method, 0, ""},
+ {"Builder", Type, 10, ""},
+ {"Clone", Func, 18, "func(s string) string"},
+ {"Compare", Func, 5, "func(a string, b string) int"},
+ {"Contains", Func, 0, "func(s string, substr string) bool"},
+ {"ContainsAny", Func, 0, "func(s string, chars string) bool"},
+ {"ContainsFunc", Func, 21, "func(s string, f func(rune) bool) bool"},
+ {"ContainsRune", Func, 0, "func(s string, r rune) bool"},
+ {"Count", Func, 0, "func(s string, substr string) int"},
+ {"Cut", Func, 18, "func(s string, sep string) (before string, after string, found bool)"},
+ {"CutPrefix", Func, 20, "func(s string, prefix string) (after string, found bool)"},
+ {"CutSuffix", Func, 20, "func(s string, suffix string) (before string, found bool)"},
+ {"EqualFold", Func, 0, "func(s string, t string) bool"},
+ {"Fields", Func, 0, "func(s string) []string"},
+ {"FieldsFunc", Func, 0, "func(s string, f func(rune) bool) []string"},
+ {"FieldsFuncSeq", Func, 24, "func(s string, f func(rune) bool) iter.Seq[string]"},
+ {"FieldsSeq", Func, 24, "func(s string) iter.Seq[string]"},
+ {"HasPrefix", Func, 0, "func(s string, prefix string) bool"},
+ {"HasSuffix", Func, 0, "func(s string, suffix string) bool"},
+ {"Index", Func, 0, "func(s string, substr string) int"},
+ {"IndexAny", Func, 0, "func(s string, chars string) int"},
+ {"IndexByte", Func, 2, "func(s string, c byte) int"},
+ {"IndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
+ {"IndexRune", Func, 0, "func(s string, r rune) int"},
+ {"Join", Func, 0, "func(elems []string, sep string) string"},
+ {"LastIndex", Func, 0, "func(s string, substr string) int"},
+ {"LastIndexAny", Func, 0, "func(s string, chars string) int"},
+ {"LastIndexByte", Func, 5, "func(s string, c byte) int"},
+ {"LastIndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
+ {"Lines", Func, 24, "func(s string) iter.Seq[string]"},
+ {"Map", Func, 0, "func(mapping func(rune) rune, s string) string"},
+ {"NewReader", Func, 0, "func(s string) *Reader"},
+ {"NewReplacer", Func, 0, "func(oldnew ...string) *Replacer"},
+ {"Reader", Type, 0, ""},
+ {"Repeat", Func, 0, "func(s string, count int) string"},
+ {"Replace", Func, 0, "func(s string, old string, new string, n int) string"},
+ {"ReplaceAll", Func, 12, "func(s string, old string, new string) string"},
+ {"Replacer", Type, 0, ""},
+ {"Split", Func, 0, "func(s string, sep string) []string"},
+ {"SplitAfter", Func, 0, "func(s string, sep string) []string"},
+ {"SplitAfterN", Func, 0, "func(s string, sep string, n int) []string"},
+ {"SplitAfterSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
+ {"SplitN", Func, 0, "func(s string, sep string, n int) []string"},
+ {"SplitSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
+ {"Title", Func, 0, "func(s string) string"},
+ {"ToLower", Func, 0, "func(s string) string"},
+ {"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+ {"ToTitle", Func, 0, "func(s string) string"},
+ {"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+ {"ToUpper", Func, 0, "func(s string) string"},
+ {"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+ {"ToValidUTF8", Func, 13, "func(s string, replacement string) string"},
+ {"Trim", Func, 0, "func(s string, cutset string) string"},
+ {"TrimFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+ {"TrimLeft", Func, 0, "func(s string, cutset string) string"},
+ {"TrimLeftFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+ {"TrimPrefix", Func, 1, "func(s string, prefix string) string"},
+ {"TrimRight", Func, 0, "func(s string, cutset string) string"},
+ {"TrimRightFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+ {"TrimSpace", Func, 0, "func(s string) string"},
+ {"TrimSuffix", Func, 1, "func(s string, suffix string) string"},
+ },
+ "structs": {
+ {"HostLayout", Type, 23, ""},
+ },
+ "sync": {
+ {"(*Cond).Broadcast", Method, 0, ""},
+ {"(*Cond).Signal", Method, 0, ""},
+ {"(*Cond).Wait", Method, 0, ""},
+ {"(*Map).Clear", Method, 23, ""},
+ {"(*Map).CompareAndDelete", Method, 20, ""},
+ {"(*Map).CompareAndSwap", Method, 20, ""},
+ {"(*Map).Delete", Method, 9, ""},
+ {"(*Map).Load", Method, 9, ""},
+ {"(*Map).LoadAndDelete", Method, 15, ""},
+ {"(*Map).LoadOrStore", Method, 9, ""},
+ {"(*Map).Range", Method, 9, ""},
+ {"(*Map).Store", Method, 9, ""},
+ {"(*Map).Swap", Method, 20, ""},
+ {"(*Mutex).Lock", Method, 0, ""},
+ {"(*Mutex).TryLock", Method, 18, ""},
+ {"(*Mutex).Unlock", Method, 0, ""},
+ {"(*Once).Do", Method, 0, ""},
+ {"(*Pool).Get", Method, 3, ""},
+ {"(*Pool).Put", Method, 3, ""},
+ {"(*RWMutex).Lock", Method, 0, ""},
+ {"(*RWMutex).RLock", Method, 0, ""},
+ {"(*RWMutex).RLocker", Method, 0, ""},
+ {"(*RWMutex).RUnlock", Method, 0, ""},
+ {"(*RWMutex).TryLock", Method, 18, ""},
+ {"(*RWMutex).TryRLock", Method, 18, ""},
+ {"(*RWMutex).Unlock", Method, 0, ""},
+ {"(*WaitGroup).Add", Method, 0, ""},
+ {"(*WaitGroup).Done", Method, 0, ""},
+ {"(*WaitGroup).Go", Method, 25, ""},
+ {"(*WaitGroup).Wait", Method, 0, ""},
+ {"Cond", Type, 0, ""},
+ {"Cond.L", Field, 0, ""},
+ {"Locker", Type, 0, ""},
+ {"Map", Type, 9, ""},
+ {"Mutex", Type, 0, ""},
+ {"NewCond", Func, 0, "func(l Locker) *Cond"},
+ {"Once", Type, 0, ""},
+ {"OnceFunc", Func, 21, "func(f func()) func()"},
+ {"OnceValue", Func, 21, "func[T any](f func() T) func() T"},
+ {"OnceValues", Func, 21, "func[T1, T2 any](f func() (T1, T2)) func() (T1, T2)"},
+ {"Pool", Type, 3, ""},
+ {"Pool.New", Field, 3, ""},
+ {"RWMutex", Type, 0, ""},
+ {"WaitGroup", Type, 0, ""},
+ },
+ "sync/atomic": {
+ {"(*Bool).CompareAndSwap", Method, 19, ""},
+ {"(*Bool).Load", Method, 19, ""},
+ {"(*Bool).Store", Method, 19, ""},
+ {"(*Bool).Swap", Method, 19, ""},
+ {"(*Int32).Add", Method, 19, ""},
+ {"(*Int32).And", Method, 23, ""},
+ {"(*Int32).CompareAndSwap", Method, 19, ""},
+ {"(*Int32).Load", Method, 19, ""},
+ {"(*Int32).Or", Method, 23, ""},
+ {"(*Int32).Store", Method, 19, ""},
+ {"(*Int32).Swap", Method, 19, ""},
+ {"(*Int64).Add", Method, 19, ""},
+ {"(*Int64).And", Method, 23, ""},
+ {"(*Int64).CompareAndSwap", Method, 19, ""},
+ {"(*Int64).Load", Method, 19, ""},
+ {"(*Int64).Or", Method, 23, ""},
+ {"(*Int64).Store", Method, 19, ""},
+ {"(*Int64).Swap", Method, 19, ""},
+ {"(*Pointer).CompareAndSwap", Method, 19, ""},
+ {"(*Pointer).Load", Method, 19, ""},
+ {"(*Pointer).Store", Method, 19, ""},
+ {"(*Pointer).Swap", Method, 19, ""},
+ {"(*Uint32).Add", Method, 19, ""},
+ {"(*Uint32).And", Method, 23, ""},
+ {"(*Uint32).CompareAndSwap", Method, 19, ""},
+ {"(*Uint32).Load", Method, 19, ""},
+ {"(*Uint32).Or", Method, 23, ""},
+ {"(*Uint32).Store", Method, 19, ""},
+ {"(*Uint32).Swap", Method, 19, ""},
+ {"(*Uint64).Add", Method, 19, ""},
+ {"(*Uint64).And", Method, 23, ""},
+ {"(*Uint64).CompareAndSwap", Method, 19, ""},
+ {"(*Uint64).Load", Method, 19, ""},
+ {"(*Uint64).Or", Method, 23, ""},
+ {"(*Uint64).Store", Method, 19, ""},
+ {"(*Uint64).Swap", Method, 19, ""},
+ {"(*Uintptr).Add", Method, 19, ""},
+ {"(*Uintptr).And", Method, 23, ""},
+ {"(*Uintptr).CompareAndSwap", Method, 19, ""},
+ {"(*Uintptr).Load", Method, 19, ""},
+ {"(*Uintptr).Or", Method, 23, ""},
+ {"(*Uintptr).Store", Method, 19, ""},
+ {"(*Uintptr).Swap", Method, 19, ""},
+ {"(*Value).CompareAndSwap", Method, 17, ""},
+ {"(*Value).Load", Method, 4, ""},
+ {"(*Value).Store", Method, 4, ""},
+ {"(*Value).Swap", Method, 17, ""},
+ {"AddInt32", Func, 0, "func(addr *int32, delta int32) (new int32)"},
+ {"AddInt64", Func, 0, "func(addr *int64, delta int64) (new int64)"},
+ {"AddUint32", Func, 0, "func(addr *uint32, delta uint32) (new uint32)"},
+ {"AddUint64", Func, 0, "func(addr *uint64, delta uint64) (new uint64)"},
+ {"AddUintptr", Func, 0, "func(addr *uintptr, delta uintptr) (new uintptr)"},
+ {"AndInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
+ {"AndInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
+ {"AndUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
+ {"AndUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
+ {"AndUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
+ {"Bool", Type, 19, ""},
+ {"CompareAndSwapInt32", Func, 0, "func(addr *int32, old int32, new int32) (swapped bool)"},
+ {"CompareAndSwapInt64", Func, 0, "func(addr *int64, old int64, new int64) (swapped bool)"},
+ {"CompareAndSwapPointer", Func, 0, "func(addr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) (swapped bool)"},
+ {"CompareAndSwapUint32", Func, 0, "func(addr *uint32, old uint32, new uint32) (swapped bool)"},
+ {"CompareAndSwapUint64", Func, 0, "func(addr *uint64, old uint64, new uint64) (swapped bool)"},
+ {"CompareAndSwapUintptr", Func, 0, "func(addr *uintptr, old uintptr, new uintptr) (swapped bool)"},
+ {"Int32", Type, 19, ""},
+ {"Int64", Type, 19, ""},
+ {"LoadInt32", Func, 0, "func(addr *int32) (val int32)"},
+ {"LoadInt64", Func, 0, "func(addr *int64) (val int64)"},
+ {"LoadPointer", Func, 0, "func(addr *unsafe.Pointer) (val unsafe.Pointer)"},
+ {"LoadUint32", Func, 0, "func(addr *uint32) (val uint32)"},
+ {"LoadUint64", Func, 0, "func(addr *uint64) (val uint64)"},
+ {"LoadUintptr", Func, 0, "func(addr *uintptr) (val uintptr)"},
+ {"OrInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
+ {"OrInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
+ {"OrUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
+ {"OrUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
+ {"OrUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
+ {"Pointer", Type, 19, ""},
+ {"StoreInt32", Func, 0, "func(addr *int32, val int32)"},
+ {"StoreInt64", Func, 0, "func(addr *int64, val int64)"},
+ {"StorePointer", Func, 0, "func(addr *unsafe.Pointer, val unsafe.Pointer)"},
+ {"StoreUint32", Func, 0, "func(addr *uint32, val uint32)"},
+ {"StoreUint64", Func, 0, "func(addr *uint64, val uint64)"},
+ {"StoreUintptr", Func, 0, "func(addr *uintptr, val uintptr)"},
+ {"SwapInt32", Func, 2, "func(addr *int32, new int32) (old int32)"},
+ {"SwapInt64", Func, 2, "func(addr *int64, new int64) (old int64)"},
+ {"SwapPointer", Func, 2, "func(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)"},
+ {"SwapUint32", Func, 2, "func(addr *uint32, new uint32) (old uint32)"},
+ {"SwapUint64", Func, 2, "func(addr *uint64, new uint64) (old uint64)"},
+ {"SwapUintptr", Func, 2, "func(addr *uintptr, new uintptr) (old uintptr)"},
+ {"Uint32", Type, 19, ""},
+ {"Uint64", Type, 19, ""},
+ {"Uintptr", Type, 19, ""},
+ {"Value", Type, 4, ""},
+ },
+ "syscall": {
+ {"(*Cmsghdr).SetLen", Method, 0, ""},
+ {"(*DLL).FindProc", Method, 0, ""},
+ {"(*DLL).MustFindProc", Method, 0, ""},
+ {"(*DLL).Release", Method, 0, ""},
+ {"(*DLLError).Error", Method, 0, ""},
+ {"(*DLLError).Unwrap", Method, 16, ""},
+ {"(*Filetime).Nanoseconds", Method, 0, ""},
+ {"(*Iovec).SetLen", Method, 0, ""},
+ {"(*LazyDLL).Handle", Method, 0, ""},
+ {"(*LazyDLL).Load", Method, 0, ""},
+ {"(*LazyDLL).NewProc", Method, 0, ""},
+ {"(*LazyProc).Addr", Method, 0, ""},
+ {"(*LazyProc).Call", Method, 0, ""},
+ {"(*LazyProc).Find", Method, 0, ""},
+ {"(*Msghdr).SetControllen", Method, 0, ""},
+ {"(*Proc).Addr", Method, 0, ""},
+ {"(*Proc).Call", Method, 0, ""},
+ {"(*PtraceRegs).PC", Method, 0, ""},
+ {"(*PtraceRegs).SetPC", Method, 0, ""},
+ {"(*RawSockaddrAny).Sockaddr", Method, 0, ""},
+ {"(*SID).Copy", Method, 0, ""},
+ {"(*SID).Len", Method, 0, ""},
+ {"(*SID).LookupAccount", Method, 0, ""},
+ {"(*SID).String", Method, 0, ""},
+ {"(*Timespec).Nano", Method, 0, ""},
+ {"(*Timespec).Unix", Method, 0, ""},
+ {"(*Timeval).Nano", Method, 0, ""},
+ {"(*Timeval).Nanoseconds", Method, 0, ""},
+ {"(*Timeval).Unix", Method, 0, ""},
+ {"(Errno).Error", Method, 0, ""},
+ {"(Errno).Is", Method, 13, ""},
+ {"(Errno).Temporary", Method, 0, ""},
+ {"(Errno).Timeout", Method, 0, ""},
+ {"(Signal).Signal", Method, 0, ""},
+ {"(Signal).String", Method, 0, ""},
+ {"(Token).Close", Method, 0, ""},
+ {"(Token).GetTokenPrimaryGroup", Method, 0, ""},
+ {"(Token).GetTokenUser", Method, 0, ""},
+ {"(Token).GetUserProfileDirectory", Method, 0, ""},
+ {"(WaitStatus).Continued", Method, 0, ""},
+ {"(WaitStatus).CoreDump", Method, 0, ""},
+ {"(WaitStatus).ExitStatus", Method, 0, ""},
+ {"(WaitStatus).Exited", Method, 0, ""},
+ {"(WaitStatus).Signal", Method, 0, ""},
+ {"(WaitStatus).Signaled", Method, 0, ""},
+ {"(WaitStatus).StopSignal", Method, 0, ""},
+ {"(WaitStatus).Stopped", Method, 0, ""},
+ {"(WaitStatus).TrapCause", Method, 0, ""},
+ {"AF_ALG", Const, 0, ""},
+ {"AF_APPLETALK", Const, 0, ""},
+ {"AF_ARP", Const, 0, ""},
+ {"AF_ASH", Const, 0, ""},
+ {"AF_ATM", Const, 0, ""},
+ {"AF_ATMPVC", Const, 0, ""},
+ {"AF_ATMSVC", Const, 0, ""},
+ {"AF_AX25", Const, 0, ""},
+ {"AF_BLUETOOTH", Const, 0, ""},
+ {"AF_BRIDGE", Const, 0, ""},
+ {"AF_CAIF", Const, 0, ""},
+ {"AF_CAN", Const, 0, ""},
+ {"AF_CCITT", Const, 0, ""},
+ {"AF_CHAOS", Const, 0, ""},
+ {"AF_CNT", Const, 0, ""},
+ {"AF_COIP", Const, 0, ""},
+ {"AF_DATAKIT", Const, 0, ""},
+ {"AF_DECnet", Const, 0, ""},
+ {"AF_DLI", Const, 0, ""},
+ {"AF_E164", Const, 0, ""},
+ {"AF_ECMA", Const, 0, ""},
+ {"AF_ECONET", Const, 0, ""},
+ {"AF_ENCAP", Const, 1, ""},
+ {"AF_FILE", Const, 0, ""},
+ {"AF_HYLINK", Const, 0, ""},
+ {"AF_IEEE80211", Const, 0, ""},
+ {"AF_IEEE802154", Const, 0, ""},
+ {"AF_IMPLINK", Const, 0, ""},
+ {"AF_INET", Const, 0, ""},
+ {"AF_INET6", Const, 0, ""},
+ {"AF_INET6_SDP", Const, 3, ""},
+ {"AF_INET_SDP", Const, 3, ""},
+ {"AF_IPX", Const, 0, ""},
+ {"AF_IRDA", Const, 0, ""},
+ {"AF_ISDN", Const, 0, ""},
+ {"AF_ISO", Const, 0, ""},
+ {"AF_IUCV", Const, 0, ""},
+ {"AF_KEY", Const, 0, ""},
+ {"AF_LAT", Const, 0, ""},
+ {"AF_LINK", Const, 0, ""},
+ {"AF_LLC", Const, 0, ""},
+ {"AF_LOCAL", Const, 0, ""},
+ {"AF_MAX", Const, 0, ""},
+ {"AF_MPLS", Const, 1, ""},
+ {"AF_NATM", Const, 0, ""},
+ {"AF_NDRV", Const, 0, ""},
+ {"AF_NETBEUI", Const, 0, ""},
+ {"AF_NETBIOS", Const, 0, ""},
+ {"AF_NETGRAPH", Const, 0, ""},
+ {"AF_NETLINK", Const, 0, ""},
+ {"AF_NETROM", Const, 0, ""},
+ {"AF_NS", Const, 0, ""},
+ {"AF_OROUTE", Const, 1, ""},
+ {"AF_OSI", Const, 0, ""},
+ {"AF_PACKET", Const, 0, ""},
+ {"AF_PHONET", Const, 0, ""},
+ {"AF_PPP", Const, 0, ""},
+ {"AF_PPPOX", Const, 0, ""},
+ {"AF_PUP", Const, 0, ""},
+ {"AF_RDS", Const, 0, ""},
+ {"AF_RESERVED_36", Const, 0, ""},
+ {"AF_ROSE", Const, 0, ""},
+ {"AF_ROUTE", Const, 0, ""},
+ {"AF_RXRPC", Const, 0, ""},
+ {"AF_SCLUSTER", Const, 0, ""},
+ {"AF_SECURITY", Const, 0, ""},
+ {"AF_SIP", Const, 0, ""},
+ {"AF_SLOW", Const, 0, ""},
+ {"AF_SNA", Const, 0, ""},
+ {"AF_SYSTEM", Const, 0, ""},
+ {"AF_TIPC", Const, 0, ""},
+ {"AF_UNIX", Const, 0, ""},
+ {"AF_UNSPEC", Const, 0, ""},
+ {"AF_UTUN", Const, 16, ""},
+ {"AF_VENDOR00", Const, 0, ""},
+ {"AF_VENDOR01", Const, 0, ""},
+ {"AF_VENDOR02", Const, 0, ""},
+ {"AF_VENDOR03", Const, 0, ""},
+ {"AF_VENDOR04", Const, 0, ""},
+ {"AF_VENDOR05", Const, 0, ""},
+ {"AF_VENDOR06", Const, 0, ""},
+ {"AF_VENDOR07", Const, 0, ""},
+ {"AF_VENDOR08", Const, 0, ""},
+ {"AF_VENDOR09", Const, 0, ""},
+ {"AF_VENDOR10", Const, 0, ""},
+ {"AF_VENDOR11", Const, 0, ""},
+ {"AF_VENDOR12", Const, 0, ""},
+ {"AF_VENDOR13", Const, 0, ""},
+ {"AF_VENDOR14", Const, 0, ""},
+ {"AF_VENDOR15", Const, 0, ""},
+ {"AF_VENDOR16", Const, 0, ""},
+ {"AF_VENDOR17", Const, 0, ""},
+ {"AF_VENDOR18", Const, 0, ""},
+ {"AF_VENDOR19", Const, 0, ""},
+ {"AF_VENDOR20", Const, 0, ""},
+ {"AF_VENDOR21", Const, 0, ""},
+ {"AF_VENDOR22", Const, 0, ""},
+ {"AF_VENDOR23", Const, 0, ""},
+ {"AF_VENDOR24", Const, 0, ""},
+ {"AF_VENDOR25", Const, 0, ""},
+ {"AF_VENDOR26", Const, 0, ""},
+ {"AF_VENDOR27", Const, 0, ""},
+ {"AF_VENDOR28", Const, 0, ""},
+ {"AF_VENDOR29", Const, 0, ""},
+ {"AF_VENDOR30", Const, 0, ""},
+ {"AF_VENDOR31", Const, 0, ""},
+ {"AF_VENDOR32", Const, 0, ""},
+ {"AF_VENDOR33", Const, 0, ""},
+ {"AF_VENDOR34", Const, 0, ""},
+ {"AF_VENDOR35", Const, 0, ""},
+ {"AF_VENDOR36", Const, 0, ""},
+ {"AF_VENDOR37", Const, 0, ""},
+ {"AF_VENDOR38", Const, 0, ""},
+ {"AF_VENDOR39", Const, 0, ""},
+ {"AF_VENDOR40", Const, 0, ""},
+ {"AF_VENDOR41", Const, 0, ""},
+ {"AF_VENDOR42", Const, 0, ""},
+ {"AF_VENDOR43", Const, 0, ""},
+ {"AF_VENDOR44", Const, 0, ""},
+ {"AF_VENDOR45", Const, 0, ""},
+ {"AF_VENDOR46", Const, 0, ""},
+ {"AF_VENDOR47", Const, 0, ""},
+ {"AF_WANPIPE", Const, 0, ""},
+ {"AF_X25", Const, 0, ""},
+ {"AI_CANONNAME", Const, 1, ""},
+ {"AI_NUMERICHOST", Const, 1, ""},
+ {"AI_PASSIVE", Const, 1, ""},
+ {"APPLICATION_ERROR", Const, 0, ""},
+ {"ARPHRD_ADAPT", Const, 0, ""},
+ {"ARPHRD_APPLETLK", Const, 0, ""},
+ {"ARPHRD_ARCNET", Const, 0, ""},
+ {"ARPHRD_ASH", Const, 0, ""},
+ {"ARPHRD_ATM", Const, 0, ""},
+ {"ARPHRD_AX25", Const, 0, ""},
+ {"ARPHRD_BIF", Const, 0, ""},
+ {"ARPHRD_CHAOS", Const, 0, ""},
+ {"ARPHRD_CISCO", Const, 0, ""},
+ {"ARPHRD_CSLIP", Const, 0, ""},
+ {"ARPHRD_CSLIP6", Const, 0, ""},
+ {"ARPHRD_DDCMP", Const, 0, ""},
+ {"ARPHRD_DLCI", Const, 0, ""},
+ {"ARPHRD_ECONET", Const, 0, ""},
+ {"ARPHRD_EETHER", Const, 0, ""},
+ {"ARPHRD_ETHER", Const, 0, ""},
+ {"ARPHRD_EUI64", Const, 0, ""},
+ {"ARPHRD_FCAL", Const, 0, ""},
+ {"ARPHRD_FCFABRIC", Const, 0, ""},
+ {"ARPHRD_FCPL", Const, 0, ""},
+ {"ARPHRD_FCPP", Const, 0, ""},
+ {"ARPHRD_FDDI", Const, 0, ""},
+ {"ARPHRD_FRAD", Const, 0, ""},
+ {"ARPHRD_FRELAY", Const, 1, ""},
+ {"ARPHRD_HDLC", Const, 0, ""},
+ {"ARPHRD_HIPPI", Const, 0, ""},
+ {"ARPHRD_HWX25", Const, 0, ""},
+ {"ARPHRD_IEEE1394", Const, 0, ""},
+ {"ARPHRD_IEEE802", Const, 0, ""},
+ {"ARPHRD_IEEE80211", Const, 0, ""},
+ {"ARPHRD_IEEE80211_PRISM", Const, 0, ""},
+ {"ARPHRD_IEEE80211_RADIOTAP", Const, 0, ""},
+ {"ARPHRD_IEEE802154", Const, 0, ""},
+ {"ARPHRD_IEEE802154_PHY", Const, 0, ""},
+ {"ARPHRD_IEEE802_TR", Const, 0, ""},
+ {"ARPHRD_INFINIBAND", Const, 0, ""},
+ {"ARPHRD_IPDDP", Const, 0, ""},
+ {"ARPHRD_IPGRE", Const, 0, ""},
+ {"ARPHRD_IRDA", Const, 0, ""},
+ {"ARPHRD_LAPB", Const, 0, ""},
+ {"ARPHRD_LOCALTLK", Const, 0, ""},
+ {"ARPHRD_LOOPBACK", Const, 0, ""},
+ {"ARPHRD_METRICOM", Const, 0, ""},
+ {"ARPHRD_NETROM", Const, 0, ""},
+ {"ARPHRD_NONE", Const, 0, ""},
+ {"ARPHRD_PIMREG", Const, 0, ""},
+ {"ARPHRD_PPP", Const, 0, ""},
+ {"ARPHRD_PRONET", Const, 0, ""},
+ {"ARPHRD_RAWHDLC", Const, 0, ""},
+ {"ARPHRD_ROSE", Const, 0, ""},
+ {"ARPHRD_RSRVD", Const, 0, ""},
+ {"ARPHRD_SIT", Const, 0, ""},
+ {"ARPHRD_SKIP", Const, 0, ""},
+ {"ARPHRD_SLIP", Const, 0, ""},
+ {"ARPHRD_SLIP6", Const, 0, ""},
+ {"ARPHRD_STRIP", Const, 1, ""},
+ {"ARPHRD_TUNNEL", Const, 0, ""},
+ {"ARPHRD_TUNNEL6", Const, 0, ""},
+ {"ARPHRD_VOID", Const, 0, ""},
+ {"ARPHRD_X25", Const, 0, ""},
+ {"AUTHTYPE_CLIENT", Const, 0, ""},
+ {"AUTHTYPE_SERVER", Const, 0, ""},
+ {"Accept", Func, 0, "func(fd int) (nfd int, sa Sockaddr, err error)"},
+ {"Accept4", Func, 1, "func(fd int, flags int) (nfd int, sa Sockaddr, err error)"},
+ {"AcceptEx", Func, 0, ""},
+ {"Access", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Acct", Func, 0, "func(path string) (err error)"},
+ {"AddrinfoW", Type, 1, ""},
+ {"AddrinfoW.Addr", Field, 1, ""},
+ {"AddrinfoW.Addrlen", Field, 1, ""},
+ {"AddrinfoW.Canonname", Field, 1, ""},
+ {"AddrinfoW.Family", Field, 1, ""},
+ {"AddrinfoW.Flags", Field, 1, ""},
+ {"AddrinfoW.Next", Field, 1, ""},
+ {"AddrinfoW.Protocol", Field, 1, ""},
+ {"AddrinfoW.Socktype", Field, 1, ""},
+ {"Adjtime", Func, 0, ""},
+ {"Adjtimex", Func, 0, "func(buf *Timex) (state int, err error)"},
+ {"AllThreadsSyscall", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"AllThreadsSyscall6", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"AttachLsf", Func, 0, "func(fd int, i []SockFilter) error"},
+ {"B0", Const, 0, ""},
+ {"B1000000", Const, 0, ""},
+ {"B110", Const, 0, ""},
+ {"B115200", Const, 0, ""},
+ {"B1152000", Const, 0, ""},
+ {"B1200", Const, 0, ""},
+ {"B134", Const, 0, ""},
+ {"B14400", Const, 1, ""},
+ {"B150", Const, 0, ""},
+ {"B1500000", Const, 0, ""},
+ {"B1800", Const, 0, ""},
+ {"B19200", Const, 0, ""},
+ {"B200", Const, 0, ""},
+ {"B2000000", Const, 0, ""},
+ {"B230400", Const, 0, ""},
+ {"B2400", Const, 0, ""},
+ {"B2500000", Const, 0, ""},
+ {"B28800", Const, 1, ""},
+ {"B300", Const, 0, ""},
+ {"B3000000", Const, 0, ""},
+ {"B3500000", Const, 0, ""},
+ {"B38400", Const, 0, ""},
+ {"B4000000", Const, 0, ""},
+ {"B460800", Const, 0, ""},
+ {"B4800", Const, 0, ""},
+ {"B50", Const, 0, ""},
+ {"B500000", Const, 0, ""},
+ {"B57600", Const, 0, ""},
+ {"B576000", Const, 0, ""},
+ {"B600", Const, 0, ""},
+ {"B7200", Const, 1, ""},
+ {"B75", Const, 0, ""},
+ {"B76800", Const, 1, ""},
+ {"B921600", Const, 0, ""},
+ {"B9600", Const, 0, ""},
+ {"BASE_PROTOCOL", Const, 2, ""},
+ {"BIOCFEEDBACK", Const, 0, ""},
+ {"BIOCFLUSH", Const, 0, ""},
+ {"BIOCGBLEN", Const, 0, ""},
+ {"BIOCGDIRECTION", Const, 0, ""},
+ {"BIOCGDIRFILT", Const, 1, ""},
+ {"BIOCGDLT", Const, 0, ""},
+ {"BIOCGDLTLIST", Const, 0, ""},
+ {"BIOCGETBUFMODE", Const, 0, ""},
+ {"BIOCGETIF", Const, 0, ""},
+ {"BIOCGETZMAX", Const, 0, ""},
+ {"BIOCGFEEDBACK", Const, 1, ""},
+ {"BIOCGFILDROP", Const, 1, ""},
+ {"BIOCGHDRCMPLT", Const, 0, ""},
+ {"BIOCGRSIG", Const, 0, ""},
+ {"BIOCGRTIMEOUT", Const, 0, ""},
+ {"BIOCGSEESENT", Const, 0, ""},
+ {"BIOCGSTATS", Const, 0, ""},
+ {"BIOCGSTATSOLD", Const, 1, ""},
+ {"BIOCGTSTAMP", Const, 1, ""},
+ {"BIOCIMMEDIATE", Const, 0, ""},
+ {"BIOCLOCK", Const, 0, ""},
+ {"BIOCPROMISC", Const, 0, ""},
+ {"BIOCROTZBUF", Const, 0, ""},
+ {"BIOCSBLEN", Const, 0, ""},
+ {"BIOCSDIRECTION", Const, 0, ""},
+ {"BIOCSDIRFILT", Const, 1, ""},
+ {"BIOCSDLT", Const, 0, ""},
+ {"BIOCSETBUFMODE", Const, 0, ""},
+ {"BIOCSETF", Const, 0, ""},
+ {"BIOCSETFNR", Const, 0, ""},
+ {"BIOCSETIF", Const, 0, ""},
+ {"BIOCSETWF", Const, 0, ""},
+ {"BIOCSETZBUF", Const, 0, ""},
+ {"BIOCSFEEDBACK", Const, 1, ""},
+ {"BIOCSFILDROP", Const, 1, ""},
+ {"BIOCSHDRCMPLT", Const, 0, ""},
+ {"BIOCSRSIG", Const, 0, ""},
+ {"BIOCSRTIMEOUT", Const, 0, ""},
+ {"BIOCSSEESENT", Const, 0, ""},
+ {"BIOCSTCPF", Const, 1, ""},
+ {"BIOCSTSTAMP", Const, 1, ""},
+ {"BIOCSUDPF", Const, 1, ""},
+ {"BIOCVERSION", Const, 0, ""},
+ {"BPF_A", Const, 0, ""},
+ {"BPF_ABS", Const, 0, ""},
+ {"BPF_ADD", Const, 0, ""},
+ {"BPF_ALIGNMENT", Const, 0, ""},
+ {"BPF_ALIGNMENT32", Const, 1, ""},
+ {"BPF_ALU", Const, 0, ""},
+ {"BPF_AND", Const, 0, ""},
+ {"BPF_B", Const, 0, ""},
+ {"BPF_BUFMODE_BUFFER", Const, 0, ""},
+ {"BPF_BUFMODE_ZBUF", Const, 0, ""},
+ {"BPF_DFLTBUFSIZE", Const, 1, ""},
+ {"BPF_DIRECTION_IN", Const, 1, ""},
+ {"BPF_DIRECTION_OUT", Const, 1, ""},
+ {"BPF_DIV", Const, 0, ""},
+ {"BPF_H", Const, 0, ""},
+ {"BPF_IMM", Const, 0, ""},
+ {"BPF_IND", Const, 0, ""},
+ {"BPF_JA", Const, 0, ""},
+ {"BPF_JEQ", Const, 0, ""},
+ {"BPF_JGE", Const, 0, ""},
+ {"BPF_JGT", Const, 0, ""},
+ {"BPF_JMP", Const, 0, ""},
+ {"BPF_JSET", Const, 0, ""},
+ {"BPF_K", Const, 0, ""},
+ {"BPF_LD", Const, 0, ""},
+ {"BPF_LDX", Const, 0, ""},
+ {"BPF_LEN", Const, 0, ""},
+ {"BPF_LSH", Const, 0, ""},
+ {"BPF_MAJOR_VERSION", Const, 0, ""},
+ {"BPF_MAXBUFSIZE", Const, 0, ""},
+ {"BPF_MAXINSNS", Const, 0, ""},
+ {"BPF_MEM", Const, 0, ""},
+ {"BPF_MEMWORDS", Const, 0, ""},
+ {"BPF_MINBUFSIZE", Const, 0, ""},
+ {"BPF_MINOR_VERSION", Const, 0, ""},
+ {"BPF_MISC", Const, 0, ""},
+ {"BPF_MSH", Const, 0, ""},
+ {"BPF_MUL", Const, 0, ""},
+ {"BPF_NEG", Const, 0, ""},
+ {"BPF_OR", Const, 0, ""},
+ {"BPF_RELEASE", Const, 0, ""},
+ {"BPF_RET", Const, 0, ""},
+ {"BPF_RSH", Const, 0, ""},
+ {"BPF_ST", Const, 0, ""},
+ {"BPF_STX", Const, 0, ""},
+ {"BPF_SUB", Const, 0, ""},
+ {"BPF_TAX", Const, 0, ""},
+ {"BPF_TXA", Const, 0, ""},
+ {"BPF_T_BINTIME", Const, 1, ""},
+ {"BPF_T_BINTIME_FAST", Const, 1, ""},
+ {"BPF_T_BINTIME_MONOTONIC", Const, 1, ""},
+ {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_FAST", Const, 1, ""},
+ {"BPF_T_FLAG_MASK", Const, 1, ""},
+ {"BPF_T_FORMAT_MASK", Const, 1, ""},
+ {"BPF_T_MICROTIME", Const, 1, ""},
+ {"BPF_T_MICROTIME_FAST", Const, 1, ""},
+ {"BPF_T_MICROTIME_MONOTONIC", Const, 1, ""},
+ {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_MONOTONIC", Const, 1, ""},
+ {"BPF_T_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_NANOTIME", Const, 1, ""},
+ {"BPF_T_NANOTIME_FAST", Const, 1, ""},
+ {"BPF_T_NANOTIME_MONOTONIC", Const, 1, ""},
+ {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_NONE", Const, 1, ""},
+ {"BPF_T_NORMAL", Const, 1, ""},
+ {"BPF_W", Const, 0, ""},
+ {"BPF_X", Const, 0, ""},
+ {"BRKINT", Const, 0, ""},
+ {"Bind", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
+ {"BindToDevice", Func, 0, "func(fd int, device string) (err error)"},
+ {"BpfBuflen", Func, 0, ""},
+ {"BpfDatalink", Func, 0, ""},
+ {"BpfHdr", Type, 0, ""},
+ {"BpfHdr.Caplen", Field, 0, ""},
+ {"BpfHdr.Datalen", Field, 0, ""},
+ {"BpfHdr.Hdrlen", Field, 0, ""},
+ {"BpfHdr.Pad_cgo_0", Field, 0, ""},
+ {"BpfHdr.Tstamp", Field, 0, ""},
+ {"BpfHeadercmpl", Func, 0, ""},
+ {"BpfInsn", Type, 0, ""},
+ {"BpfInsn.Code", Field, 0, ""},
+ {"BpfInsn.Jf", Field, 0, ""},
+ {"BpfInsn.Jt", Field, 0, ""},
+ {"BpfInsn.K", Field, 0, ""},
+ {"BpfInterface", Func, 0, ""},
+ {"BpfJump", Func, 0, ""},
+ {"BpfProgram", Type, 0, ""},
+ {"BpfProgram.Insns", Field, 0, ""},
+ {"BpfProgram.Len", Field, 0, ""},
+ {"BpfProgram.Pad_cgo_0", Field, 0, ""},
+ {"BpfStat", Type, 0, ""},
+ {"BpfStat.Capt", Field, 2, ""},
+ {"BpfStat.Drop", Field, 0, ""},
+ {"BpfStat.Padding", Field, 2, ""},
+ {"BpfStat.Recv", Field, 0, ""},
+ {"BpfStats", Func, 0, ""},
+ {"BpfStmt", Func, 0, ""},
+ {"BpfTimeout", Func, 0, ""},
+ {"BpfTimeval", Type, 2, ""},
+ {"BpfTimeval.Sec", Field, 2, ""},
+ {"BpfTimeval.Usec", Field, 2, ""},
+ {"BpfVersion", Type, 0, ""},
+ {"BpfVersion.Major", Field, 0, ""},
+ {"BpfVersion.Minor", Field, 0, ""},
+ {"BpfZbuf", Type, 0, ""},
+ {"BpfZbuf.Bufa", Field, 0, ""},
+ {"BpfZbuf.Bufb", Field, 0, ""},
+ {"BpfZbuf.Buflen", Field, 0, ""},
+ {"BpfZbufHeader", Type, 0, ""},
+ {"BpfZbufHeader.Kernel_gen", Field, 0, ""},
+ {"BpfZbufHeader.Kernel_len", Field, 0, ""},
+ {"BpfZbufHeader.User_gen", Field, 0, ""},
+ {"BpfZbufHeader.X_bzh_pad", Field, 0, ""},
+ {"ByHandleFileInformation", Type, 0, ""},
+ {"ByHandleFileInformation.CreationTime", Field, 0, ""},
+ {"ByHandleFileInformation.FileAttributes", Field, 0, ""},
+ {"ByHandleFileInformation.FileIndexHigh", Field, 0, ""},
+ {"ByHandleFileInformation.FileIndexLow", Field, 0, ""},
+ {"ByHandleFileInformation.FileSizeHigh", Field, 0, ""},
+ {"ByHandleFileInformation.FileSizeLow", Field, 0, ""},
+ {"ByHandleFileInformation.LastAccessTime", Field, 0, ""},
+ {"ByHandleFileInformation.LastWriteTime", Field, 0, ""},
+ {"ByHandleFileInformation.NumberOfLinks", Field, 0, ""},
+ {"ByHandleFileInformation.VolumeSerialNumber", Field, 0, ""},
+ {"BytePtrFromString", Func, 1, "func(s string) (*byte, error)"},
+ {"ByteSliceFromString", Func, 1, "func(s string) ([]byte, error)"},
+ {"CCR0_FLUSH", Const, 1, ""},
+ {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_BASE", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_EV", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_SSL", Const, 0, ""},
+ {"CERT_E_CN_NO_MATCH", Const, 0, ""},
+ {"CERT_E_EXPIRED", Const, 0, ""},
+ {"CERT_E_PURPOSE", Const, 0, ""},
+ {"CERT_E_ROLE", Const, 0, ""},
+ {"CERT_E_UNTRUSTEDROOT", Const, 0, ""},
+ {"CERT_STORE_ADD_ALWAYS", Const, 0, ""},
+ {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0, ""},
+ {"CERT_STORE_PROV_MEMORY", Const, 0, ""},
+ {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0, ""},
+ {"CERT_TRUST_INVALID_EXTENSION", Const, 0, ""},
+ {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0, ""},
+ {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0, ""},
+ {"CERT_TRUST_IS_CYCLIC", Const, 0, ""},
+ {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0, ""},
+ {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0, ""},
+ {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0, ""},
+ {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0, ""},
+ {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0, ""},
+ {"CERT_TRUST_IS_REVOKED", Const, 0, ""},
+ {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0, ""},
+ {"CERT_TRUST_NO_ERROR", Const, 0, ""},
+ {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0, ""},
+ {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0, ""},
+ {"CFLUSH", Const, 1, ""},
+ {"CLOCAL", Const, 0, ""},
+ {"CLONE_CHILD_CLEARTID", Const, 2, ""},
+ {"CLONE_CHILD_SETTID", Const, 2, ""},
+ {"CLONE_CLEAR_SIGHAND", Const, 20, ""},
+ {"CLONE_CSIGNAL", Const, 3, ""},
+ {"CLONE_DETACHED", Const, 2, ""},
+ {"CLONE_FILES", Const, 2, ""},
+ {"CLONE_FS", Const, 2, ""},
+ {"CLONE_INTO_CGROUP", Const, 20, ""},
+ {"CLONE_IO", Const, 2, ""},
+ {"CLONE_NEWCGROUP", Const, 20, ""},
+ {"CLONE_NEWIPC", Const, 2, ""},
+ {"CLONE_NEWNET", Const, 2, ""},
+ {"CLONE_NEWNS", Const, 2, ""},
+ {"CLONE_NEWPID", Const, 2, ""},
+ {"CLONE_NEWTIME", Const, 20, ""},
+ {"CLONE_NEWUSER", Const, 2, ""},
+ {"CLONE_NEWUTS", Const, 2, ""},
+ {"CLONE_PARENT", Const, 2, ""},
+ {"CLONE_PARENT_SETTID", Const, 2, ""},
+ {"CLONE_PID", Const, 3, ""},
+ {"CLONE_PIDFD", Const, 20, ""},
+ {"CLONE_PTRACE", Const, 2, ""},
+ {"CLONE_SETTLS", Const, 2, ""},
+ {"CLONE_SIGHAND", Const, 2, ""},
+ {"CLONE_SYSVSEM", Const, 2, ""},
+ {"CLONE_THREAD", Const, 2, ""},
+ {"CLONE_UNTRACED", Const, 2, ""},
+ {"CLONE_VFORK", Const, 2, ""},
+ {"CLONE_VM", Const, 2, ""},
+ {"CPUID_CFLUSH", Const, 1, ""},
+ {"CREAD", Const, 0, ""},
+ {"CREATE_ALWAYS", Const, 0, ""},
+ {"CREATE_NEW", Const, 0, ""},
+ {"CREATE_NEW_PROCESS_GROUP", Const, 1, ""},
+ {"CREATE_UNICODE_ENVIRONMENT", Const, 0, ""},
+ {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0, ""},
+ {"CRYPT_DELETEKEYSET", Const, 0, ""},
+ {"CRYPT_MACHINE_KEYSET", Const, 0, ""},
+ {"CRYPT_NEWKEYSET", Const, 0, ""},
+ {"CRYPT_SILENT", Const, 0, ""},
+ {"CRYPT_VERIFYCONTEXT", Const, 0, ""},
+ {"CS5", Const, 0, ""},
+ {"CS6", Const, 0, ""},
+ {"CS7", Const, 0, ""},
+ {"CS8", Const, 0, ""},
+ {"CSIZE", Const, 0, ""},
+ {"CSTART", Const, 1, ""},
+ {"CSTATUS", Const, 1, ""},
+ {"CSTOP", Const, 1, ""},
+ {"CSTOPB", Const, 0, ""},
+ {"CSUSP", Const, 1, ""},
+ {"CTL_MAXNAME", Const, 0, ""},
+ {"CTL_NET", Const, 0, ""},
+ {"CTL_QUERY", Const, 1, ""},
+ {"CTRL_BREAK_EVENT", Const, 1, ""},
+ {"CTRL_CLOSE_EVENT", Const, 14, ""},
+ {"CTRL_C_EVENT", Const, 1, ""},
+ {"CTRL_LOGOFF_EVENT", Const, 14, ""},
+ {"CTRL_SHUTDOWN_EVENT", Const, 14, ""},
+ {"CancelIo", Func, 0, ""},
+ {"CancelIoEx", Func, 1, ""},
+ {"CertAddCertificateContextToStore", Func, 0, ""},
+ {"CertChainContext", Type, 0, ""},
+ {"CertChainContext.ChainCount", Field, 0, ""},
+ {"CertChainContext.Chains", Field, 0, ""},
+ {"CertChainContext.HasRevocationFreshnessTime", Field, 0, ""},
+ {"CertChainContext.LowerQualityChainCount", Field, 0, ""},
+ {"CertChainContext.LowerQualityChains", Field, 0, ""},
+ {"CertChainContext.RevocationFreshnessTime", Field, 0, ""},
+ {"CertChainContext.Size", Field, 0, ""},
+ {"CertChainContext.TrustStatus", Field, 0, ""},
+ {"CertChainElement", Type, 0, ""},
+ {"CertChainElement.ApplicationUsage", Field, 0, ""},
+ {"CertChainElement.CertContext", Field, 0, ""},
+ {"CertChainElement.ExtendedErrorInfo", Field, 0, ""},
+ {"CertChainElement.IssuanceUsage", Field, 0, ""},
+ {"CertChainElement.RevocationInfo", Field, 0, ""},
+ {"CertChainElement.Size", Field, 0, ""},
+ {"CertChainElement.TrustStatus", Field, 0, ""},
+ {"CertChainPara", Type, 0, ""},
+ {"CertChainPara.CacheResync", Field, 0, ""},
+ {"CertChainPara.CheckRevocationFreshnessTime", Field, 0, ""},
+ {"CertChainPara.RequestedUsage", Field, 0, ""},
+ {"CertChainPara.RequstedIssuancePolicy", Field, 0, ""},
+ {"CertChainPara.RevocationFreshnessTime", Field, 0, ""},
+ {"CertChainPara.Size", Field, 0, ""},
+ {"CertChainPara.URLRetrievalTimeout", Field, 0, ""},
+ {"CertChainPolicyPara", Type, 0, ""},
+ {"CertChainPolicyPara.ExtraPolicyPara", Field, 0, ""},
+ {"CertChainPolicyPara.Flags", Field, 0, ""},
+ {"CertChainPolicyPara.Size", Field, 0, ""},
+ {"CertChainPolicyStatus", Type, 0, ""},
+ {"CertChainPolicyStatus.ChainIndex", Field, 0, ""},
+ {"CertChainPolicyStatus.ElementIndex", Field, 0, ""},
+ {"CertChainPolicyStatus.Error", Field, 0, ""},
+ {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0, ""},
+ {"CertChainPolicyStatus.Size", Field, 0, ""},
+ {"CertCloseStore", Func, 0, ""},
+ {"CertContext", Type, 0, ""},
+ {"CertContext.CertInfo", Field, 0, ""},
+ {"CertContext.EncodedCert", Field, 0, ""},
+ {"CertContext.EncodingType", Field, 0, ""},
+ {"CertContext.Length", Field, 0, ""},
+ {"CertContext.Store", Field, 0, ""},
+ {"CertCreateCertificateContext", Func, 0, ""},
+ {"CertEnhKeyUsage", Type, 0, ""},
+ {"CertEnhKeyUsage.Length", Field, 0, ""},
+ {"CertEnhKeyUsage.UsageIdentifiers", Field, 0, ""},
+ {"CertEnumCertificatesInStore", Func, 0, ""},
+ {"CertFreeCertificateChain", Func, 0, ""},
+ {"CertFreeCertificateContext", Func, 0, ""},
+ {"CertGetCertificateChain", Func, 0, ""},
+ {"CertInfo", Type, 11, ""},
+ {"CertOpenStore", Func, 0, ""},
+ {"CertOpenSystemStore", Func, 0, ""},
+ {"CertRevocationCrlInfo", Type, 11, ""},
+ {"CertRevocationInfo", Type, 0, ""},
+ {"CertRevocationInfo.CrlInfo", Field, 0, ""},
+ {"CertRevocationInfo.FreshnessTime", Field, 0, ""},
+ {"CertRevocationInfo.HasFreshnessTime", Field, 0, ""},
+ {"CertRevocationInfo.OidSpecificInfo", Field, 0, ""},
+ {"CertRevocationInfo.RevocationOid", Field, 0, ""},
+ {"CertRevocationInfo.RevocationResult", Field, 0, ""},
+ {"CertRevocationInfo.Size", Field, 0, ""},
+ {"CertSimpleChain", Type, 0, ""},
+ {"CertSimpleChain.Elements", Field, 0, ""},
+ {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0, ""},
+ {"CertSimpleChain.NumElements", Field, 0, ""},
+ {"CertSimpleChain.RevocationFreshnessTime", Field, 0, ""},
+ {"CertSimpleChain.Size", Field, 0, ""},
+ {"CertSimpleChain.TrustListInfo", Field, 0, ""},
+ {"CertSimpleChain.TrustStatus", Field, 0, ""},
+ {"CertTrustListInfo", Type, 11, ""},
+ {"CertTrustStatus", Type, 0, ""},
+ {"CertTrustStatus.ErrorStatus", Field, 0, ""},
+ {"CertTrustStatus.InfoStatus", Field, 0, ""},
+ {"CertUsageMatch", Type, 0, ""},
+ {"CertUsageMatch.Type", Field, 0, ""},
+ {"CertUsageMatch.Usage", Field, 0, ""},
+ {"CertVerifyCertificateChainPolicy", Func, 0, ""},
+ {"Chdir", Func, 0, "func(path string) (err error)"},
+ {"CheckBpfVersion", Func, 0, ""},
+ {"Chflags", Func, 0, ""},
+ {"Chmod", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Chown", Func, 0, "func(path string, uid int, gid int) (err error)"},
+ {"Chroot", Func, 0, "func(path string) (err error)"},
+ {"Clearenv", Func, 0, "func()"},
+ {"Close", Func, 0, "func(fd int) (err error)"},
+ {"CloseHandle", Func, 0, ""},
+ {"CloseOnExec", Func, 0, "func(fd int)"},
+ {"Closesocket", Func, 0, ""},
+ {"CmsgLen", Func, 0, "func(datalen int) int"},
+ {"CmsgSpace", Func, 0, "func(datalen int) int"},
+ {"Cmsghdr", Type, 0, ""},
+ {"Cmsghdr.Len", Field, 0, ""},
+ {"Cmsghdr.Level", Field, 0, ""},
+ {"Cmsghdr.Type", Field, 0, ""},
+ {"Cmsghdr.X__cmsg_data", Field, 0, ""},
+ {"CommandLineToArgv", Func, 0, ""},
+ {"ComputerName", Func, 0, ""},
+ {"Conn", Type, 9, ""},
+ {"Connect", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
+ {"ConnectEx", Func, 1, ""},
+ {"ConvertSidToStringSid", Func, 0, ""},
+ {"ConvertStringSidToSid", Func, 0, ""},
+ {"CopySid", Func, 0, ""},
+ {"Creat", Func, 0, "func(path string, mode uint32) (fd int, err error)"},
+ {"CreateDirectory", Func, 0, ""},
+ {"CreateFile", Func, 0, ""},
+ {"CreateFileMapping", Func, 0, ""},
+ {"CreateHardLink", Func, 4, ""},
+ {"CreateIoCompletionPort", Func, 0, ""},
+ {"CreatePipe", Func, 0, ""},
+ {"CreateProcess", Func, 0, ""},
+ {"CreateProcessAsUser", Func, 10, ""},
+ {"CreateSymbolicLink", Func, 4, ""},
+ {"CreateToolhelp32Snapshot", Func, 4, ""},
+ {"Credential", Type, 0, ""},
+ {"Credential.Gid", Field, 0, ""},
+ {"Credential.Groups", Field, 0, ""},
+ {"Credential.NoSetGroups", Field, 9, ""},
+ {"Credential.Uid", Field, 0, ""},
+ {"CryptAcquireContext", Func, 0, ""},
+ {"CryptGenRandom", Func, 0, ""},
+ {"CryptReleaseContext", Func, 0, ""},
+ {"DIOCBSFLUSH", Const, 1, ""},
+ {"DIOCOSFPFLUSH", Const, 1, ""},
+ {"DLL", Type, 0, ""},
+ {"DLL.Handle", Field, 0, ""},
+ {"DLL.Name", Field, 0, ""},
+ {"DLLError", Type, 0, ""},
+ {"DLLError.Err", Field, 0, ""},
+ {"DLLError.Msg", Field, 0, ""},
+ {"DLLError.ObjName", Field, 0, ""},
+ {"DLT_A429", Const, 0, ""},
+ {"DLT_A653_ICM", Const, 0, ""},
+ {"DLT_AIRONET_HEADER", Const, 0, ""},
+ {"DLT_AOS", Const, 1, ""},
+ {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0, ""},
+ {"DLT_ARCNET", Const, 0, ""},
+ {"DLT_ARCNET_LINUX", Const, 0, ""},
+ {"DLT_ATM_CLIP", Const, 0, ""},
+ {"DLT_ATM_RFC1483", Const, 0, ""},
+ {"DLT_AURORA", Const, 0, ""},
+ {"DLT_AX25", Const, 0, ""},
+ {"DLT_AX25_KISS", Const, 0, ""},
+ {"DLT_BACNET_MS_TP", Const, 0, ""},
+ {"DLT_BLUETOOTH_HCI_H4", Const, 0, ""},
+ {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0, ""},
+ {"DLT_CAN20B", Const, 0, ""},
+ {"DLT_CAN_SOCKETCAN", Const, 1, ""},
+ {"DLT_CHAOS", Const, 0, ""},
+ {"DLT_CHDLC", Const, 0, ""},
+ {"DLT_CISCO_IOS", Const, 0, ""},
+ {"DLT_C_HDLC", Const, 0, ""},
+ {"DLT_C_HDLC_WITH_DIR", Const, 0, ""},
+ {"DLT_DBUS", Const, 1, ""},
+ {"DLT_DECT", Const, 1, ""},
+ {"DLT_DOCSIS", Const, 0, ""},
+ {"DLT_DVB_CI", Const, 1, ""},
+ {"DLT_ECONET", Const, 0, ""},
+ {"DLT_EN10MB", Const, 0, ""},
+ {"DLT_EN3MB", Const, 0, ""},
+ {"DLT_ENC", Const, 0, ""},
+ {"DLT_ERF", Const, 0, ""},
+ {"DLT_ERF_ETH", Const, 0, ""},
+ {"DLT_ERF_POS", Const, 0, ""},
+ {"DLT_FC_2", Const, 1, ""},
+ {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1, ""},
+ {"DLT_FDDI", Const, 0, ""},
+ {"DLT_FLEXRAY", Const, 0, ""},
+ {"DLT_FRELAY", Const, 0, ""},
+ {"DLT_FRELAY_WITH_DIR", Const, 0, ""},
+ {"DLT_GCOM_SERIAL", Const, 0, ""},
+ {"DLT_GCOM_T1E1", Const, 0, ""},
+ {"DLT_GPF_F", Const, 0, ""},
+ {"DLT_GPF_T", Const, 0, ""},
+ {"DLT_GPRS_LLC", Const, 0, ""},
+ {"DLT_GSMTAP_ABIS", Const, 1, ""},
+ {"DLT_GSMTAP_UM", Const, 1, ""},
+ {"DLT_HDLC", Const, 1, ""},
+ {"DLT_HHDLC", Const, 0, ""},
+ {"DLT_HIPPI", Const, 1, ""},
+ {"DLT_IBM_SN", Const, 0, ""},
+ {"DLT_IBM_SP", Const, 0, ""},
+ {"DLT_IEEE802", Const, 0, ""},
+ {"DLT_IEEE802_11", Const, 0, ""},
+ {"DLT_IEEE802_11_RADIO", Const, 0, ""},
+ {"DLT_IEEE802_11_RADIO_AVS", Const, 0, ""},
+ {"DLT_IEEE802_15_4", Const, 0, ""},
+ {"DLT_IEEE802_15_4_LINUX", Const, 0, ""},
+ {"DLT_IEEE802_15_4_NOFCS", Const, 1, ""},
+ {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0, ""},
+ {"DLT_IEEE802_16_MAC_CPS", Const, 0, ""},
+ {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0, ""},
+ {"DLT_IPFILTER", Const, 0, ""},
+ {"DLT_IPMB", Const, 0, ""},
+ {"DLT_IPMB_LINUX", Const, 0, ""},
+ {"DLT_IPNET", Const, 1, ""},
+ {"DLT_IPOIB", Const, 1, ""},
+ {"DLT_IPV4", Const, 1, ""},
+ {"DLT_IPV6", Const, 1, ""},
+ {"DLT_IP_OVER_FC", Const, 0, ""},
+ {"DLT_JUNIPER_ATM1", Const, 0, ""},
+ {"DLT_JUNIPER_ATM2", Const, 0, ""},
+ {"DLT_JUNIPER_ATM_CEMIC", Const, 1, ""},
+ {"DLT_JUNIPER_CHDLC", Const, 0, ""},
+ {"DLT_JUNIPER_ES", Const, 0, ""},
+ {"DLT_JUNIPER_ETHER", Const, 0, ""},
+ {"DLT_JUNIPER_FIBRECHANNEL", Const, 1, ""},
+ {"DLT_JUNIPER_FRELAY", Const, 0, ""},
+ {"DLT_JUNIPER_GGSN", Const, 0, ""},
+ {"DLT_JUNIPER_ISM", Const, 0, ""},
+ {"DLT_JUNIPER_MFR", Const, 0, ""},
+ {"DLT_JUNIPER_MLFR", Const, 0, ""},
+ {"DLT_JUNIPER_MLPPP", Const, 0, ""},
+ {"DLT_JUNIPER_MONITOR", Const, 0, ""},
+ {"DLT_JUNIPER_PIC_PEER", Const, 0, ""},
+ {"DLT_JUNIPER_PPP", Const, 0, ""},
+ {"DLT_JUNIPER_PPPOE", Const, 0, ""},
+ {"DLT_JUNIPER_PPPOE_ATM", Const, 0, ""},
+ {"DLT_JUNIPER_SERVICES", Const, 0, ""},
+ {"DLT_JUNIPER_SRX_E2E", Const, 1, ""},
+ {"DLT_JUNIPER_ST", Const, 0, ""},
+ {"DLT_JUNIPER_VP", Const, 0, ""},
+ {"DLT_JUNIPER_VS", Const, 1, ""},
+ {"DLT_LAPB_WITH_DIR", Const, 0, ""},
+ {"DLT_LAPD", Const, 0, ""},
+ {"DLT_LIN", Const, 0, ""},
+ {"DLT_LINUX_EVDEV", Const, 1, ""},
+ {"DLT_LINUX_IRDA", Const, 0, ""},
+ {"DLT_LINUX_LAPD", Const, 0, ""},
+ {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0, ""},
+ {"DLT_LINUX_SLL", Const, 0, ""},
+ {"DLT_LOOP", Const, 0, ""},
+ {"DLT_LTALK", Const, 0, ""},
+ {"DLT_MATCHING_MAX", Const, 1, ""},
+ {"DLT_MATCHING_MIN", Const, 1, ""},
+ {"DLT_MFR", Const, 0, ""},
+ {"DLT_MOST", Const, 0, ""},
+ {"DLT_MPEG_2_TS", Const, 1, ""},
+ {"DLT_MPLS", Const, 1, ""},
+ {"DLT_MTP2", Const, 0, ""},
+ {"DLT_MTP2_WITH_PHDR", Const, 0, ""},
+ {"DLT_MTP3", Const, 0, ""},
+ {"DLT_MUX27010", Const, 1, ""},
+ {"DLT_NETANALYZER", Const, 1, ""},
+ {"DLT_NETANALYZER_TRANSPARENT", Const, 1, ""},
+ {"DLT_NFC_LLCP", Const, 1, ""},
+ {"DLT_NFLOG", Const, 1, ""},
+ {"DLT_NG40", Const, 1, ""},
+ {"DLT_NULL", Const, 0, ""},
+ {"DLT_PCI_EXP", Const, 0, ""},
+ {"DLT_PFLOG", Const, 0, ""},
+ {"DLT_PFSYNC", Const, 0, ""},
+ {"DLT_PPI", Const, 0, ""},
+ {"DLT_PPP", Const, 0, ""},
+ {"DLT_PPP_BSDOS", Const, 0, ""},
+ {"DLT_PPP_ETHER", Const, 0, ""},
+ {"DLT_PPP_PPPD", Const, 0, ""},
+ {"DLT_PPP_SERIAL", Const, 0, ""},
+ {"DLT_PPP_WITH_DIR", Const, 0, ""},
+ {"DLT_PPP_WITH_DIRECTION", Const, 0, ""},
+ {"DLT_PRISM_HEADER", Const, 0, ""},
+ {"DLT_PRONET", Const, 0, ""},
+ {"DLT_RAIF1", Const, 0, ""},
+ {"DLT_RAW", Const, 0, ""},
+ {"DLT_RAWAF_MASK", Const, 1, ""},
+ {"DLT_RIO", Const, 0, ""},
+ {"DLT_SCCP", Const, 0, ""},
+ {"DLT_SITA", Const, 0, ""},
+ {"DLT_SLIP", Const, 0, ""},
+ {"DLT_SLIP_BSDOS", Const, 0, ""},
+ {"DLT_STANAG_5066_D_PDU", Const, 1, ""},
+ {"DLT_SUNATM", Const, 0, ""},
+ {"DLT_SYMANTEC_FIREWALL", Const, 0, ""},
+ {"DLT_TZSP", Const, 0, ""},
+ {"DLT_USB", Const, 0, ""},
+ {"DLT_USB_LINUX", Const, 0, ""},
+ {"DLT_USB_LINUX_MMAPPED", Const, 1, ""},
+ {"DLT_USER0", Const, 0, ""},
+ {"DLT_USER1", Const, 0, ""},
+ {"DLT_USER10", Const, 0, ""},
+ {"DLT_USER11", Const, 0, ""},
+ {"DLT_USER12", Const, 0, ""},
+ {"DLT_USER13", Const, 0, ""},
+ {"DLT_USER14", Const, 0, ""},
+ {"DLT_USER15", Const, 0, ""},
+ {"DLT_USER2", Const, 0, ""},
+ {"DLT_USER3", Const, 0, ""},
+ {"DLT_USER4", Const, 0, ""},
+ {"DLT_USER5", Const, 0, ""},
+ {"DLT_USER6", Const, 0, ""},
+ {"DLT_USER7", Const, 0, ""},
+ {"DLT_USER8", Const, 0, ""},
+ {"DLT_USER9", Const, 0, ""},
+ {"DLT_WIHART", Const, 1, ""},
+ {"DLT_X2E_SERIAL", Const, 0, ""},
+ {"DLT_X2E_XORAYA", Const, 0, ""},
+ {"DNSMXData", Type, 0, ""},
+ {"DNSMXData.NameExchange", Field, 0, ""},
+ {"DNSMXData.Pad", Field, 0, ""},
+ {"DNSMXData.Preference", Field, 0, ""},
+ {"DNSPTRData", Type, 0, ""},
+ {"DNSPTRData.Host", Field, 0, ""},
+ {"DNSRecord", Type, 0, ""},
+ {"DNSRecord.Data", Field, 0, ""},
+ {"DNSRecord.Dw", Field, 0, ""},
+ {"DNSRecord.Length", Field, 0, ""},
+ {"DNSRecord.Name", Field, 0, ""},
+ {"DNSRecord.Next", Field, 0, ""},
+ {"DNSRecord.Reserved", Field, 0, ""},
+ {"DNSRecord.Ttl", Field, 0, ""},
+ {"DNSRecord.Type", Field, 0, ""},
+ {"DNSSRVData", Type, 0, ""},
+ {"DNSSRVData.Pad", Field, 0, ""},
+ {"DNSSRVData.Port", Field, 0, ""},
+ {"DNSSRVData.Priority", Field, 0, ""},
+ {"DNSSRVData.Target", Field, 0, ""},
+ {"DNSSRVData.Weight", Field, 0, ""},
+ {"DNSTXTData", Type, 0, ""},
+ {"DNSTXTData.StringArray", Field, 0, ""},
+ {"DNSTXTData.StringCount", Field, 0, ""},
+ {"DNS_INFO_NO_RECORDS", Const, 4, ""},
+ {"DNS_TYPE_A", Const, 0, ""},
+ {"DNS_TYPE_A6", Const, 0, ""},
+ {"DNS_TYPE_AAAA", Const, 0, ""},
+ {"DNS_TYPE_ADDRS", Const, 0, ""},
+ {"DNS_TYPE_AFSDB", Const, 0, ""},
+ {"DNS_TYPE_ALL", Const, 0, ""},
+ {"DNS_TYPE_ANY", Const, 0, ""},
+ {"DNS_TYPE_ATMA", Const, 0, ""},
+ {"DNS_TYPE_AXFR", Const, 0, ""},
+ {"DNS_TYPE_CERT", Const, 0, ""},
+ {"DNS_TYPE_CNAME", Const, 0, ""},
+ {"DNS_TYPE_DHCID", Const, 0, ""},
+ {"DNS_TYPE_DNAME", Const, 0, ""},
+ {"DNS_TYPE_DNSKEY", Const, 0, ""},
+ {"DNS_TYPE_DS", Const, 0, ""},
+ {"DNS_TYPE_EID", Const, 0, ""},
+ {"DNS_TYPE_GID", Const, 0, ""},
+ {"DNS_TYPE_GPOS", Const, 0, ""},
+ {"DNS_TYPE_HINFO", Const, 0, ""},
+ {"DNS_TYPE_ISDN", Const, 0, ""},
+ {"DNS_TYPE_IXFR", Const, 0, ""},
+ {"DNS_TYPE_KEY", Const, 0, ""},
+ {"DNS_TYPE_KX", Const, 0, ""},
+ {"DNS_TYPE_LOC", Const, 0, ""},
+ {"DNS_TYPE_MAILA", Const, 0, ""},
+ {"DNS_TYPE_MAILB", Const, 0, ""},
+ {"DNS_TYPE_MB", Const, 0, ""},
+ {"DNS_TYPE_MD", Const, 0, ""},
+ {"DNS_TYPE_MF", Const, 0, ""},
+ {"DNS_TYPE_MG", Const, 0, ""},
+ {"DNS_TYPE_MINFO", Const, 0, ""},
+ {"DNS_TYPE_MR", Const, 0, ""},
+ {"DNS_TYPE_MX", Const, 0, ""},
+ {"DNS_TYPE_NAPTR", Const, 0, ""},
+ {"DNS_TYPE_NBSTAT", Const, 0, ""},
+ {"DNS_TYPE_NIMLOC", Const, 0, ""},
+ {"DNS_TYPE_NS", Const, 0, ""},
+ {"DNS_TYPE_NSAP", Const, 0, ""},
+ {"DNS_TYPE_NSAPPTR", Const, 0, ""},
+ {"DNS_TYPE_NSEC", Const, 0, ""},
+ {"DNS_TYPE_NULL", Const, 0, ""},
+ {"DNS_TYPE_NXT", Const, 0, ""},
+ {"DNS_TYPE_OPT", Const, 0, ""},
+ {"DNS_TYPE_PTR", Const, 0, ""},
+ {"DNS_TYPE_PX", Const, 0, ""},
+ {"DNS_TYPE_RP", Const, 0, ""},
+ {"DNS_TYPE_RRSIG", Const, 0, ""},
+ {"DNS_TYPE_RT", Const, 0, ""},
+ {"DNS_TYPE_SIG", Const, 0, ""},
+ {"DNS_TYPE_SINK", Const, 0, ""},
+ {"DNS_TYPE_SOA", Const, 0, ""},
+ {"DNS_TYPE_SRV", Const, 0, ""},
+ {"DNS_TYPE_TEXT", Const, 0, ""},
+ {"DNS_TYPE_TKEY", Const, 0, ""},
+ {"DNS_TYPE_TSIG", Const, 0, ""},
+ {"DNS_TYPE_UID", Const, 0, ""},
+ {"DNS_TYPE_UINFO", Const, 0, ""},
+ {"DNS_TYPE_UNSPEC", Const, 0, ""},
+ {"DNS_TYPE_WINS", Const, 0, ""},
+ {"DNS_TYPE_WINSR", Const, 0, ""},
+ {"DNS_TYPE_WKS", Const, 0, ""},
+ {"DNS_TYPE_X25", Const, 0, ""},
+ {"DT_BLK", Const, 0, ""},
+ {"DT_CHR", Const, 0, ""},
+ {"DT_DIR", Const, 0, ""},
+ {"DT_FIFO", Const, 0, ""},
+ {"DT_LNK", Const, 0, ""},
+ {"DT_REG", Const, 0, ""},
+ {"DT_SOCK", Const, 0, ""},
+ {"DT_UNKNOWN", Const, 0, ""},
+ {"DT_WHT", Const, 0, ""},
+ {"DUPLICATE_CLOSE_SOURCE", Const, 0, ""},
+ {"DUPLICATE_SAME_ACCESS", Const, 0, ""},
+ {"DeleteFile", Func, 0, ""},
+ {"DetachLsf", Func, 0, "func(fd int) error"},
+ {"DeviceIoControl", Func, 4, ""},
+ {"Dirent", Type, 0, ""},
+ {"Dirent.Fileno", Field, 0, ""},
+ {"Dirent.Ino", Field, 0, ""},
+ {"Dirent.Name", Field, 0, ""},
+ {"Dirent.Namlen", Field, 0, ""},
+ {"Dirent.Off", Field, 0, ""},
+ {"Dirent.Pad0", Field, 12, ""},
+ {"Dirent.Pad1", Field, 12, ""},
+ {"Dirent.Pad_cgo_0", Field, 0, ""},
+ {"Dirent.Reclen", Field, 0, ""},
+ {"Dirent.Seekoff", Field, 0, ""},
+ {"Dirent.Type", Field, 0, ""},
+ {"Dirent.X__d_padding", Field, 3, ""},
+ {"DnsNameCompare", Func, 4, ""},
+ {"DnsQuery", Func, 0, ""},
+ {"DnsRecordListFree", Func, 0, ""},
+ {"DnsSectionAdditional", Const, 4, ""},
+ {"DnsSectionAnswer", Const, 4, ""},
+ {"DnsSectionAuthority", Const, 4, ""},
+ {"DnsSectionQuestion", Const, 4, ""},
+ {"Dup", Func, 0, "func(oldfd int) (fd int, err error)"},
+ {"Dup2", Func, 0, "func(oldfd int, newfd int) (err error)"},
+ {"Dup3", Func, 2, "func(oldfd int, newfd int, flags int) (err error)"},
+ {"DuplicateHandle", Func, 0, ""},
+ {"E2BIG", Const, 0, ""},
+ {"EACCES", Const, 0, ""},
+ {"EADDRINUSE", Const, 0, ""},
+ {"EADDRNOTAVAIL", Const, 0, ""},
+ {"EADV", Const, 0, ""},
+ {"EAFNOSUPPORT", Const, 0, ""},
+ {"EAGAIN", Const, 0, ""},
+ {"EALREADY", Const, 0, ""},
+ {"EAUTH", Const, 0, ""},
+ {"EBADARCH", Const, 0, ""},
+ {"EBADE", Const, 0, ""},
+ {"EBADEXEC", Const, 0, ""},
+ {"EBADF", Const, 0, ""},
+ {"EBADFD", Const, 0, ""},
+ {"EBADMACHO", Const, 0, ""},
+ {"EBADMSG", Const, 0, ""},
+ {"EBADR", Const, 0, ""},
+ {"EBADRPC", Const, 0, ""},
+ {"EBADRQC", Const, 0, ""},
+ {"EBADSLT", Const, 0, ""},
+ {"EBFONT", Const, 0, ""},
+ {"EBUSY", Const, 0, ""},
+ {"ECANCELED", Const, 0, ""},
+ {"ECAPMODE", Const, 1, ""},
+ {"ECHILD", Const, 0, ""},
+ {"ECHO", Const, 0, ""},
+ {"ECHOCTL", Const, 0, ""},
+ {"ECHOE", Const, 0, ""},
+ {"ECHOK", Const, 0, ""},
+ {"ECHOKE", Const, 0, ""},
+ {"ECHONL", Const, 0, ""},
+ {"ECHOPRT", Const, 0, ""},
+ {"ECHRNG", Const, 0, ""},
+ {"ECOMM", Const, 0, ""},
+ {"ECONNABORTED", Const, 0, ""},
+ {"ECONNREFUSED", Const, 0, ""},
+ {"ECONNRESET", Const, 0, ""},
+ {"EDEADLK", Const, 0, ""},
+ {"EDEADLOCK", Const, 0, ""},
+ {"EDESTADDRREQ", Const, 0, ""},
+ {"EDEVERR", Const, 0, ""},
+ {"EDOM", Const, 0, ""},
+ {"EDOOFUS", Const, 0, ""},
+ {"EDOTDOT", Const, 0, ""},
+ {"EDQUOT", Const, 0, ""},
+ {"EEXIST", Const, 0, ""},
+ {"EFAULT", Const, 0, ""},
+ {"EFBIG", Const, 0, ""},
+ {"EFER_LMA", Const, 1, ""},
+ {"EFER_LME", Const, 1, ""},
+ {"EFER_NXE", Const, 1, ""},
+ {"EFER_SCE", Const, 1, ""},
+ {"EFTYPE", Const, 0, ""},
+ {"EHOSTDOWN", Const, 0, ""},
+ {"EHOSTUNREACH", Const, 0, ""},
+ {"EHWPOISON", Const, 0, ""},
+ {"EIDRM", Const, 0, ""},
+ {"EILSEQ", Const, 0, ""},
+ {"EINPROGRESS", Const, 0, ""},
+ {"EINTR", Const, 0, ""},
+ {"EINVAL", Const, 0, ""},
+ {"EIO", Const, 0, ""},
+ {"EIPSEC", Const, 1, ""},
+ {"EISCONN", Const, 0, ""},
+ {"EISDIR", Const, 0, ""},
+ {"EISNAM", Const, 0, ""},
+ {"EKEYEXPIRED", Const, 0, ""},
+ {"EKEYREJECTED", Const, 0, ""},
+ {"EKEYREVOKED", Const, 0, ""},
+ {"EL2HLT", Const, 0, ""},
+ {"EL2NSYNC", Const, 0, ""},
+ {"EL3HLT", Const, 0, ""},
+ {"EL3RST", Const, 0, ""},
+ {"ELAST", Const, 0, ""},
+ {"ELF_NGREG", Const, 0, ""},
+ {"ELF_PRARGSZ", Const, 0, ""},
+ {"ELIBACC", Const, 0, ""},
+ {"ELIBBAD", Const, 0, ""},
+ {"ELIBEXEC", Const, 0, ""},
+ {"ELIBMAX", Const, 0, ""},
+ {"ELIBSCN", Const, 0, ""},
+ {"ELNRNG", Const, 0, ""},
+ {"ELOOP", Const, 0, ""},
+ {"EMEDIUMTYPE", Const, 0, ""},
+ {"EMFILE", Const, 0, ""},
+ {"EMLINK", Const, 0, ""},
+ {"EMSGSIZE", Const, 0, ""},
+ {"EMT_TAGOVF", Const, 1, ""},
+ {"EMULTIHOP", Const, 0, ""},
+ {"EMUL_ENABLED", Const, 1, ""},
+ {"EMUL_LINUX", Const, 1, ""},
+ {"EMUL_LINUX32", Const, 1, ""},
+ {"EMUL_MAXID", Const, 1, ""},
+ {"EMUL_NATIVE", Const, 1, ""},
+ {"ENAMETOOLONG", Const, 0, ""},
+ {"ENAVAIL", Const, 0, ""},
+ {"ENDRUNDISC", Const, 1, ""},
+ {"ENEEDAUTH", Const, 0, ""},
+ {"ENETDOWN", Const, 0, ""},
+ {"ENETRESET", Const, 0, ""},
+ {"ENETUNREACH", Const, 0, ""},
+ {"ENFILE", Const, 0, ""},
+ {"ENOANO", Const, 0, ""},
+ {"ENOATTR", Const, 0, ""},
+ {"ENOBUFS", Const, 0, ""},
+ {"ENOCSI", Const, 0, ""},
+ {"ENODATA", Const, 0, ""},
+ {"ENODEV", Const, 0, ""},
+ {"ENOENT", Const, 0, ""},
+ {"ENOEXEC", Const, 0, ""},
+ {"ENOKEY", Const, 0, ""},
+ {"ENOLCK", Const, 0, ""},
+ {"ENOLINK", Const, 0, ""},
+ {"ENOMEDIUM", Const, 0, ""},
+ {"ENOMEM", Const, 0, ""},
+ {"ENOMSG", Const, 0, ""},
+ {"ENONET", Const, 0, ""},
+ {"ENOPKG", Const, 0, ""},
+ {"ENOPOLICY", Const, 0, ""},
+ {"ENOPROTOOPT", Const, 0, ""},
+ {"ENOSPC", Const, 0, ""},
+ {"ENOSR", Const, 0, ""},
+ {"ENOSTR", Const, 0, ""},
+ {"ENOSYS", Const, 0, ""},
+ {"ENOTBLK", Const, 0, ""},
+ {"ENOTCAPABLE", Const, 0, ""},
+ {"ENOTCONN", Const, 0, ""},
+ {"ENOTDIR", Const, 0, ""},
+ {"ENOTEMPTY", Const, 0, ""},
+ {"ENOTNAM", Const, 0, ""},
+ {"ENOTRECOVERABLE", Const, 0, ""},
+ {"ENOTSOCK", Const, 0, ""},
+ {"ENOTSUP", Const, 0, ""},
+ {"ENOTTY", Const, 0, ""},
+ {"ENOTUNIQ", Const, 0, ""},
+ {"ENXIO", Const, 0, ""},
+ {"EN_SW_CTL_INF", Const, 1, ""},
+ {"EN_SW_CTL_PREC", Const, 1, ""},
+ {"EN_SW_CTL_ROUND", Const, 1, ""},
+ {"EN_SW_DATACHAIN", Const, 1, ""},
+ {"EN_SW_DENORM", Const, 1, ""},
+ {"EN_SW_INVOP", Const, 1, ""},
+ {"EN_SW_OVERFLOW", Const, 1, ""},
+ {"EN_SW_PRECLOSS", Const, 1, ""},
+ {"EN_SW_UNDERFLOW", Const, 1, ""},
+ {"EN_SW_ZERODIV", Const, 1, ""},
+ {"EOPNOTSUPP", Const, 0, ""},
+ {"EOVERFLOW", Const, 0, ""},
+ {"EOWNERDEAD", Const, 0, ""},
+ {"EPERM", Const, 0, ""},
+ {"EPFNOSUPPORT", Const, 0, ""},
+ {"EPIPE", Const, 0, ""},
+ {"EPOLLERR", Const, 0, ""},
+ {"EPOLLET", Const, 0, ""},
+ {"EPOLLHUP", Const, 0, ""},
+ {"EPOLLIN", Const, 0, ""},
+ {"EPOLLMSG", Const, 0, ""},
+ {"EPOLLONESHOT", Const, 0, ""},
+ {"EPOLLOUT", Const, 0, ""},
+ {"EPOLLPRI", Const, 0, ""},
+ {"EPOLLRDBAND", Const, 0, ""},
+ {"EPOLLRDHUP", Const, 0, ""},
+ {"EPOLLRDNORM", Const, 0, ""},
+ {"EPOLLWRBAND", Const, 0, ""},
+ {"EPOLLWRNORM", Const, 0, ""},
+ {"EPOLL_CLOEXEC", Const, 0, ""},
+ {"EPOLL_CTL_ADD", Const, 0, ""},
+ {"EPOLL_CTL_DEL", Const, 0, ""},
+ {"EPOLL_CTL_MOD", Const, 0, ""},
+ {"EPOLL_NONBLOCK", Const, 0, ""},
+ {"EPROCLIM", Const, 0, ""},
+ {"EPROCUNAVAIL", Const, 0, ""},
+ {"EPROGMISMATCH", Const, 0, ""},
+ {"EPROGUNAVAIL", Const, 0, ""},
+ {"EPROTO", Const, 0, ""},
+ {"EPROTONOSUPPORT", Const, 0, ""},
+ {"EPROTOTYPE", Const, 0, ""},
+ {"EPWROFF", Const, 0, ""},
+ {"EQFULL", Const, 16, ""},
+ {"ERANGE", Const, 0, ""},
+ {"EREMCHG", Const, 0, ""},
+ {"EREMOTE", Const, 0, ""},
+ {"EREMOTEIO", Const, 0, ""},
+ {"ERESTART", Const, 0, ""},
+ {"ERFKILL", Const, 0, ""},
+ {"EROFS", Const, 0, ""},
+ {"ERPCMISMATCH", Const, 0, ""},
+ {"ERROR_ACCESS_DENIED", Const, 0, ""},
+ {"ERROR_ALREADY_EXISTS", Const, 0, ""},
+ {"ERROR_BROKEN_PIPE", Const, 0, ""},
+ {"ERROR_BUFFER_OVERFLOW", Const, 0, ""},
+ {"ERROR_DIR_NOT_EMPTY", Const, 8, ""},
+ {"ERROR_ENVVAR_NOT_FOUND", Const, 0, ""},
+ {"ERROR_FILE_EXISTS", Const, 0, ""},
+ {"ERROR_FILE_NOT_FOUND", Const, 0, ""},
+ {"ERROR_HANDLE_EOF", Const, 2, ""},
+ {"ERROR_INSUFFICIENT_BUFFER", Const, 0, ""},
+ {"ERROR_IO_PENDING", Const, 0, ""},
+ {"ERROR_MOD_NOT_FOUND", Const, 0, ""},
+ {"ERROR_MORE_DATA", Const, 3, ""},
+ {"ERROR_NETNAME_DELETED", Const, 3, ""},
+ {"ERROR_NOT_FOUND", Const, 1, ""},
+ {"ERROR_NO_MORE_FILES", Const, 0, ""},
+ {"ERROR_OPERATION_ABORTED", Const, 0, ""},
+ {"ERROR_PATH_NOT_FOUND", Const, 0, ""},
+ {"ERROR_PRIVILEGE_NOT_HELD", Const, 4, ""},
+ {"ERROR_PROC_NOT_FOUND", Const, 0, ""},
+ {"ESHLIBVERS", Const, 0, ""},
+ {"ESHUTDOWN", Const, 0, ""},
+ {"ESOCKTNOSUPPORT", Const, 0, ""},
+ {"ESPIPE", Const, 0, ""},
+ {"ESRCH", Const, 0, ""},
+ {"ESRMNT", Const, 0, ""},
+ {"ESTALE", Const, 0, ""},
+ {"ESTRPIPE", Const, 0, ""},
+ {"ETHERCAP_JUMBO_MTU", Const, 1, ""},
+ {"ETHERCAP_VLAN_HWTAGGING", Const, 1, ""},
+ {"ETHERCAP_VLAN_MTU", Const, 1, ""},
+ {"ETHERMIN", Const, 1, ""},
+ {"ETHERMTU", Const, 1, ""},
+ {"ETHERMTU_JUMBO", Const, 1, ""},
+ {"ETHERTYPE_8023", Const, 1, ""},
+ {"ETHERTYPE_AARP", Const, 1, ""},
+ {"ETHERTYPE_ACCTON", Const, 1, ""},
+ {"ETHERTYPE_AEONIC", Const, 1, ""},
+ {"ETHERTYPE_ALPHA", Const, 1, ""},
+ {"ETHERTYPE_AMBER", Const, 1, ""},
+ {"ETHERTYPE_AMOEBA", Const, 1, ""},
+ {"ETHERTYPE_AOE", Const, 1, ""},
+ {"ETHERTYPE_APOLLO", Const, 1, ""},
+ {"ETHERTYPE_APOLLODOMAIN", Const, 1, ""},
+ {"ETHERTYPE_APPLETALK", Const, 1, ""},
+ {"ETHERTYPE_APPLITEK", Const, 1, ""},
+ {"ETHERTYPE_ARGONAUT", Const, 1, ""},
+ {"ETHERTYPE_ARP", Const, 1, ""},
+ {"ETHERTYPE_AT", Const, 1, ""},
+ {"ETHERTYPE_ATALK", Const, 1, ""},
+ {"ETHERTYPE_ATOMIC", Const, 1, ""},
+ {"ETHERTYPE_ATT", Const, 1, ""},
+ {"ETHERTYPE_ATTSTANFORD", Const, 1, ""},
+ {"ETHERTYPE_AUTOPHON", Const, 1, ""},
+ {"ETHERTYPE_AXIS", Const, 1, ""},
+ {"ETHERTYPE_BCLOOP", Const, 1, ""},
+ {"ETHERTYPE_BOFL", Const, 1, ""},
+ {"ETHERTYPE_CABLETRON", Const, 1, ""},
+ {"ETHERTYPE_CHAOS", Const, 1, ""},
+ {"ETHERTYPE_COMDESIGN", Const, 1, ""},
+ {"ETHERTYPE_COMPUGRAPHIC", Const, 1, ""},
+ {"ETHERTYPE_COUNTERPOINT", Const, 1, ""},
+ {"ETHERTYPE_CRONUS", Const, 1, ""},
+ {"ETHERTYPE_CRONUSVLN", Const, 1, ""},
+ {"ETHERTYPE_DCA", Const, 1, ""},
+ {"ETHERTYPE_DDE", Const, 1, ""},
+ {"ETHERTYPE_DEBNI", Const, 1, ""},
+ {"ETHERTYPE_DECAM", Const, 1, ""},
+ {"ETHERTYPE_DECCUST", Const, 1, ""},
+ {"ETHERTYPE_DECDIAG", Const, 1, ""},
+ {"ETHERTYPE_DECDNS", Const, 1, ""},
+ {"ETHERTYPE_DECDTS", Const, 1, ""},
+ {"ETHERTYPE_DECEXPER", Const, 1, ""},
+ {"ETHERTYPE_DECLAST", Const, 1, ""},
+ {"ETHERTYPE_DECLTM", Const, 1, ""},
+ {"ETHERTYPE_DECMUMPS", Const, 1, ""},
+ {"ETHERTYPE_DECNETBIOS", Const, 1, ""},
+ {"ETHERTYPE_DELTACON", Const, 1, ""},
+ {"ETHERTYPE_DIDDLE", Const, 1, ""},
+ {"ETHERTYPE_DLOG1", Const, 1, ""},
+ {"ETHERTYPE_DLOG2", Const, 1, ""},
+ {"ETHERTYPE_DN", Const, 1, ""},
+ {"ETHERTYPE_DOGFIGHT", Const, 1, ""},
+ {"ETHERTYPE_DSMD", Const, 1, ""},
+ {"ETHERTYPE_ECMA", Const, 1, ""},
+ {"ETHERTYPE_ENCRYPT", Const, 1, ""},
+ {"ETHERTYPE_ES", Const, 1, ""},
+ {"ETHERTYPE_EXCELAN", Const, 1, ""},
+ {"ETHERTYPE_EXPERDATA", Const, 1, ""},
+ {"ETHERTYPE_FLIP", Const, 1, ""},
+ {"ETHERTYPE_FLOWCONTROL", Const, 1, ""},
+ {"ETHERTYPE_FRARP", Const, 1, ""},
+ {"ETHERTYPE_GENDYN", Const, 1, ""},
+ {"ETHERTYPE_HAYES", Const, 1, ""},
+ {"ETHERTYPE_HIPPI_FP", Const, 1, ""},
+ {"ETHERTYPE_HITACHI", Const, 1, ""},
+ {"ETHERTYPE_HP", Const, 1, ""},
+ {"ETHERTYPE_IEEEPUP", Const, 1, ""},
+ {"ETHERTYPE_IEEEPUPAT", Const, 1, ""},
+ {"ETHERTYPE_IMLBL", Const, 1, ""},
+ {"ETHERTYPE_IMLBLDIAG", Const, 1, ""},
+ {"ETHERTYPE_IP", Const, 1, ""},
+ {"ETHERTYPE_IPAS", Const, 1, ""},
+ {"ETHERTYPE_IPV6", Const, 1, ""},
+ {"ETHERTYPE_IPX", Const, 1, ""},
+ {"ETHERTYPE_IPXNEW", Const, 1, ""},
+ {"ETHERTYPE_KALPANA", Const, 1, ""},
+ {"ETHERTYPE_LANBRIDGE", Const, 1, ""},
+ {"ETHERTYPE_LANPROBE", Const, 1, ""},
+ {"ETHERTYPE_LAT", Const, 1, ""},
+ {"ETHERTYPE_LBACK", Const, 1, ""},
+ {"ETHERTYPE_LITTLE", Const, 1, ""},
+ {"ETHERTYPE_LLDP", Const, 1, ""},
+ {"ETHERTYPE_LOGICRAFT", Const, 1, ""},
+ {"ETHERTYPE_LOOPBACK", Const, 1, ""},
+ {"ETHERTYPE_MATRA", Const, 1, ""},
+ {"ETHERTYPE_MAX", Const, 1, ""},
+ {"ETHERTYPE_MERIT", Const, 1, ""},
+ {"ETHERTYPE_MICP", Const, 1, ""},
+ {"ETHERTYPE_MOPDL", Const, 1, ""},
+ {"ETHERTYPE_MOPRC", Const, 1, ""},
+ {"ETHERTYPE_MOTOROLA", Const, 1, ""},
+ {"ETHERTYPE_MPLS", Const, 1, ""},
+ {"ETHERTYPE_MPLS_MCAST", Const, 1, ""},
+ {"ETHERTYPE_MUMPS", Const, 1, ""},
+ {"ETHERTYPE_NBPCC", Const, 1, ""},
+ {"ETHERTYPE_NBPCLAIM", Const, 1, ""},
+ {"ETHERTYPE_NBPCLREQ", Const, 1, ""},
+ {"ETHERTYPE_NBPCLRSP", Const, 1, ""},
+ {"ETHERTYPE_NBPCREQ", Const, 1, ""},
+ {"ETHERTYPE_NBPCRSP", Const, 1, ""},
+ {"ETHERTYPE_NBPDG", Const, 1, ""},
+ {"ETHERTYPE_NBPDGB", Const, 1, ""},
+ {"ETHERTYPE_NBPDLTE", Const, 1, ""},
+ {"ETHERTYPE_NBPRAR", Const, 1, ""},
+ {"ETHERTYPE_NBPRAS", Const, 1, ""},
+ {"ETHERTYPE_NBPRST", Const, 1, ""},
+ {"ETHERTYPE_NBPSCD", Const, 1, ""},
+ {"ETHERTYPE_NBPVCD", Const, 1, ""},
+ {"ETHERTYPE_NBS", Const, 1, ""},
+ {"ETHERTYPE_NCD", Const, 1, ""},
+ {"ETHERTYPE_NESTAR", Const, 1, ""},
+ {"ETHERTYPE_NETBEUI", Const, 1, ""},
+ {"ETHERTYPE_NOVELL", Const, 1, ""},
+ {"ETHERTYPE_NS", Const, 1, ""},
+ {"ETHERTYPE_NSAT", Const, 1, ""},
+ {"ETHERTYPE_NSCOMPAT", Const, 1, ""},
+ {"ETHERTYPE_NTRAILER", Const, 1, ""},
+ {"ETHERTYPE_OS9", Const, 1, ""},
+ {"ETHERTYPE_OS9NET", Const, 1, ""},
+ {"ETHERTYPE_PACER", Const, 1, ""},
+ {"ETHERTYPE_PAE", Const, 1, ""},
+ {"ETHERTYPE_PCS", Const, 1, ""},
+ {"ETHERTYPE_PLANNING", Const, 1, ""},
+ {"ETHERTYPE_PPP", Const, 1, ""},
+ {"ETHERTYPE_PPPOE", Const, 1, ""},
+ {"ETHERTYPE_PPPOEDISC", Const, 1, ""},
+ {"ETHERTYPE_PRIMENTS", Const, 1, ""},
+ {"ETHERTYPE_PUP", Const, 1, ""},
+ {"ETHERTYPE_PUPAT", Const, 1, ""},
+ {"ETHERTYPE_QINQ", Const, 1, ""},
+ {"ETHERTYPE_RACAL", Const, 1, ""},
+ {"ETHERTYPE_RATIONAL", Const, 1, ""},
+ {"ETHERTYPE_RAWFR", Const, 1, ""},
+ {"ETHERTYPE_RCL", Const, 1, ""},
+ {"ETHERTYPE_RDP", Const, 1, ""},
+ {"ETHERTYPE_RETIX", Const, 1, ""},
+ {"ETHERTYPE_REVARP", Const, 1, ""},
+ {"ETHERTYPE_SCA", Const, 1, ""},
+ {"ETHERTYPE_SECTRA", Const, 1, ""},
+ {"ETHERTYPE_SECUREDATA", Const, 1, ""},
+ {"ETHERTYPE_SGITW", Const, 1, ""},
+ {"ETHERTYPE_SG_BOUNCE", Const, 1, ""},
+ {"ETHERTYPE_SG_DIAG", Const, 1, ""},
+ {"ETHERTYPE_SG_NETGAMES", Const, 1, ""},
+ {"ETHERTYPE_SG_RESV", Const, 1, ""},
+ {"ETHERTYPE_SIMNET", Const, 1, ""},
+ {"ETHERTYPE_SLOW", Const, 1, ""},
+ {"ETHERTYPE_SLOWPROTOCOLS", Const, 1, ""},
+ {"ETHERTYPE_SNA", Const, 1, ""},
+ {"ETHERTYPE_SNMP", Const, 1, ""},
+ {"ETHERTYPE_SONIX", Const, 1, ""},
+ {"ETHERTYPE_SPIDER", Const, 1, ""},
+ {"ETHERTYPE_SPRITE", Const, 1, ""},
+ {"ETHERTYPE_STP", Const, 1, ""},
+ {"ETHERTYPE_TALARIS", Const, 1, ""},
+ {"ETHERTYPE_TALARISMC", Const, 1, ""},
+ {"ETHERTYPE_TCPCOMP", Const, 1, ""},
+ {"ETHERTYPE_TCPSM", Const, 1, ""},
+ {"ETHERTYPE_TEC", Const, 1, ""},
+ {"ETHERTYPE_TIGAN", Const, 1, ""},
+ {"ETHERTYPE_TRAIL", Const, 1, ""},
+ {"ETHERTYPE_TRANSETHER", Const, 1, ""},
+ {"ETHERTYPE_TYMSHARE", Const, 1, ""},
+ {"ETHERTYPE_UBBST", Const, 1, ""},
+ {"ETHERTYPE_UBDEBUG", Const, 1, ""},
+ {"ETHERTYPE_UBDIAGLOOP", Const, 1, ""},
+ {"ETHERTYPE_UBDL", Const, 1, ""},
+ {"ETHERTYPE_UBNIU", Const, 1, ""},
+ {"ETHERTYPE_UBNMC", Const, 1, ""},
+ {"ETHERTYPE_VALID", Const, 1, ""},
+ {"ETHERTYPE_VARIAN", Const, 1, ""},
+ {"ETHERTYPE_VAXELN", Const, 1, ""},
+ {"ETHERTYPE_VEECO", Const, 1, ""},
+ {"ETHERTYPE_VEXP", Const, 1, ""},
+ {"ETHERTYPE_VGLAB", Const, 1, ""},
+ {"ETHERTYPE_VINES", Const, 1, ""},
+ {"ETHERTYPE_VINESECHO", Const, 1, ""},
+ {"ETHERTYPE_VINESLOOP", Const, 1, ""},
+ {"ETHERTYPE_VITAL", Const, 1, ""},
+ {"ETHERTYPE_VLAN", Const, 1, ""},
+ {"ETHERTYPE_VLTLMAN", Const, 1, ""},
+ {"ETHERTYPE_VPROD", Const, 1, ""},
+ {"ETHERTYPE_VURESERVED", Const, 1, ""},
+ {"ETHERTYPE_WATERLOO", Const, 1, ""},
+ {"ETHERTYPE_WELLFLEET", Const, 1, ""},
+ {"ETHERTYPE_X25", Const, 1, ""},
+ {"ETHERTYPE_X75", Const, 1, ""},
+ {"ETHERTYPE_XNSSM", Const, 1, ""},
+ {"ETHERTYPE_XTP", Const, 1, ""},
+ {"ETHER_ADDR_LEN", Const, 1, ""},
+ {"ETHER_ALIGN", Const, 1, ""},
+ {"ETHER_CRC_LEN", Const, 1, ""},
+ {"ETHER_CRC_POLY_BE", Const, 1, ""},
+ {"ETHER_CRC_POLY_LE", Const, 1, ""},
+ {"ETHER_HDR_LEN", Const, 1, ""},
+ {"ETHER_MAX_DIX_LEN", Const, 1, ""},
+ {"ETHER_MAX_LEN", Const, 1, ""},
+ {"ETHER_MAX_LEN_JUMBO", Const, 1, ""},
+ {"ETHER_MIN_LEN", Const, 1, ""},
+ {"ETHER_PPPOE_ENCAP_LEN", Const, 1, ""},
+ {"ETHER_TYPE_LEN", Const, 1, ""},
+ {"ETHER_VLAN_ENCAP_LEN", Const, 1, ""},
+ {"ETH_P_1588", Const, 0, ""},
+ {"ETH_P_8021Q", Const, 0, ""},
+ {"ETH_P_802_2", Const, 0, ""},
+ {"ETH_P_802_3", Const, 0, ""},
+ {"ETH_P_AARP", Const, 0, ""},
+ {"ETH_P_ALL", Const, 0, ""},
+ {"ETH_P_AOE", Const, 0, ""},
+ {"ETH_P_ARCNET", Const, 0, ""},
+ {"ETH_P_ARP", Const, 0, ""},
+ {"ETH_P_ATALK", Const, 0, ""},
+ {"ETH_P_ATMFATE", Const, 0, ""},
+ {"ETH_P_ATMMPOA", Const, 0, ""},
+ {"ETH_P_AX25", Const, 0, ""},
+ {"ETH_P_BPQ", Const, 0, ""},
+ {"ETH_P_CAIF", Const, 0, ""},
+ {"ETH_P_CAN", Const, 0, ""},
+ {"ETH_P_CONTROL", Const, 0, ""},
+ {"ETH_P_CUST", Const, 0, ""},
+ {"ETH_P_DDCMP", Const, 0, ""},
+ {"ETH_P_DEC", Const, 0, ""},
+ {"ETH_P_DIAG", Const, 0, ""},
+ {"ETH_P_DNA_DL", Const, 0, ""},
+ {"ETH_P_DNA_RC", Const, 0, ""},
+ {"ETH_P_DNA_RT", Const, 0, ""},
+ {"ETH_P_DSA", Const, 0, ""},
+ {"ETH_P_ECONET", Const, 0, ""},
+ {"ETH_P_EDSA", Const, 0, ""},
+ {"ETH_P_FCOE", Const, 0, ""},
+ {"ETH_P_FIP", Const, 0, ""},
+ {"ETH_P_HDLC", Const, 0, ""},
+ {"ETH_P_IEEE802154", Const, 0, ""},
+ {"ETH_P_IEEEPUP", Const, 0, ""},
+ {"ETH_P_IEEEPUPAT", Const, 0, ""},
+ {"ETH_P_IP", Const, 0, ""},
+ {"ETH_P_IPV6", Const, 0, ""},
+ {"ETH_P_IPX", Const, 0, ""},
+ {"ETH_P_IRDA", Const, 0, ""},
+ {"ETH_P_LAT", Const, 0, ""},
+ {"ETH_P_LINK_CTL", Const, 0, ""},
+ {"ETH_P_LOCALTALK", Const, 0, ""},
+ {"ETH_P_LOOP", Const, 0, ""},
+ {"ETH_P_MOBITEX", Const, 0, ""},
+ {"ETH_P_MPLS_MC", Const, 0, ""},
+ {"ETH_P_MPLS_UC", Const, 0, ""},
+ {"ETH_P_PAE", Const, 0, ""},
+ {"ETH_P_PAUSE", Const, 0, ""},
+ {"ETH_P_PHONET", Const, 0, ""},
+ {"ETH_P_PPPTALK", Const, 0, ""},
+ {"ETH_P_PPP_DISC", Const, 0, ""},
+ {"ETH_P_PPP_MP", Const, 0, ""},
+ {"ETH_P_PPP_SES", Const, 0, ""},
+ {"ETH_P_PUP", Const, 0, ""},
+ {"ETH_P_PUPAT", Const, 0, ""},
+ {"ETH_P_RARP", Const, 0, ""},
+ {"ETH_P_SCA", Const, 0, ""},
+ {"ETH_P_SLOW", Const, 0, ""},
+ {"ETH_P_SNAP", Const, 0, ""},
+ {"ETH_P_TEB", Const, 0, ""},
+ {"ETH_P_TIPC", Const, 0, ""},
+ {"ETH_P_TRAILER", Const, 0, ""},
+ {"ETH_P_TR_802_2", Const, 0, ""},
+ {"ETH_P_WAN_PPP", Const, 0, ""},
+ {"ETH_P_WCCP", Const, 0, ""},
+ {"ETH_P_X25", Const, 0, ""},
+ {"ETIME", Const, 0, ""},
+ {"ETIMEDOUT", Const, 0, ""},
+ {"ETOOMANYREFS", Const, 0, ""},
+ {"ETXTBSY", Const, 0, ""},
+ {"EUCLEAN", Const, 0, ""},
+ {"EUNATCH", Const, 0, ""},
+ {"EUSERS", Const, 0, ""},
+ {"EVFILT_AIO", Const, 0, ""},
+ {"EVFILT_FS", Const, 0, ""},
+ {"EVFILT_LIO", Const, 0, ""},
+ {"EVFILT_MACHPORT", Const, 0, ""},
+ {"EVFILT_PROC", Const, 0, ""},
+ {"EVFILT_READ", Const, 0, ""},
+ {"EVFILT_SIGNAL", Const, 0, ""},
+ {"EVFILT_SYSCOUNT", Const, 0, ""},
+ {"EVFILT_THREADMARKER", Const, 0, ""},
+ {"EVFILT_TIMER", Const, 0, ""},
+ {"EVFILT_USER", Const, 0, ""},
+ {"EVFILT_VM", Const, 0, ""},
+ {"EVFILT_VNODE", Const, 0, ""},
+ {"EVFILT_WRITE", Const, 0, ""},
+ {"EV_ADD", Const, 0, ""},
+ {"EV_CLEAR", Const, 0, ""},
+ {"EV_DELETE", Const, 0, ""},
+ {"EV_DISABLE", Const, 0, ""},
+ {"EV_DISPATCH", Const, 0, ""},
+ {"EV_DROP", Const, 3, ""},
+ {"EV_ENABLE", Const, 0, ""},
+ {"EV_EOF", Const, 0, ""},
+ {"EV_ERROR", Const, 0, ""},
+ {"EV_FLAG0", Const, 0, ""},
+ {"EV_FLAG1", Const, 0, ""},
+ {"EV_ONESHOT", Const, 0, ""},
+ {"EV_OOBAND", Const, 0, ""},
+ {"EV_POLL", Const, 0, ""},
+ {"EV_RECEIPT", Const, 0, ""},
+ {"EV_SYSFLAGS", Const, 0, ""},
+ {"EWINDOWS", Const, 0, ""},
+ {"EWOULDBLOCK", Const, 0, ""},
+ {"EXDEV", Const, 0, ""},
+ {"EXFULL", Const, 0, ""},
+ {"EXTA", Const, 0, ""},
+ {"EXTB", Const, 0, ""},
+ {"EXTPROC", Const, 0, ""},
+ {"Environ", Func, 0, "func() []string"},
+ {"EpollCreate", Func, 0, "func(size int) (fd int, err error)"},
+ {"EpollCreate1", Func, 0, "func(flag int) (fd int, err error)"},
+ {"EpollCtl", Func, 0, "func(epfd int, op int, fd int, event *EpollEvent) (err error)"},
+ {"EpollEvent", Type, 0, ""},
+ {"EpollEvent.Events", Field, 0, ""},
+ {"EpollEvent.Fd", Field, 0, ""},
+ {"EpollEvent.Pad", Field, 0, ""},
+ {"EpollEvent.PadFd", Field, 0, ""},
+ {"EpollWait", Func, 0, "func(epfd int, events []EpollEvent, msec int) (n int, err error)"},
+ {"Errno", Type, 0, ""},
+ {"EscapeArg", Func, 0, ""},
+ {"Exchangedata", Func, 0, ""},
+ {"Exec", Func, 0, "func(argv0 string, argv []string, envv []string) (err error)"},
+ {"Exit", Func, 0, "func(code int)"},
+ {"ExitProcess", Func, 0, ""},
+ {"FD_CLOEXEC", Const, 0, ""},
+ {"FD_SETSIZE", Const, 0, ""},
+ {"FILE_ACTION_ADDED", Const, 0, ""},
+ {"FILE_ACTION_MODIFIED", Const, 0, ""},
+ {"FILE_ACTION_REMOVED", Const, 0, ""},
+ {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0, ""},
+ {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0, ""},
+ {"FILE_APPEND_DATA", Const, 0, ""},
+ {"FILE_ATTRIBUTE_ARCHIVE", Const, 0, ""},
+ {"FILE_ATTRIBUTE_DIRECTORY", Const, 0, ""},
+ {"FILE_ATTRIBUTE_HIDDEN", Const, 0, ""},
+ {"FILE_ATTRIBUTE_NORMAL", Const, 0, ""},
+ {"FILE_ATTRIBUTE_READONLY", Const, 0, ""},
+ {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4, ""},
+ {"FILE_ATTRIBUTE_SYSTEM", Const, 0, ""},
+ {"FILE_BEGIN", Const, 0, ""},
+ {"FILE_CURRENT", Const, 0, ""},
+ {"FILE_END", Const, 0, ""},
+ {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0, ""},
+ {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4, ""},
+ {"FILE_FLAG_OVERLAPPED", Const, 0, ""},
+ {"FILE_LIST_DIRECTORY", Const, 0, ""},
+ {"FILE_MAP_COPY", Const, 0, ""},
+ {"FILE_MAP_EXECUTE", Const, 0, ""},
+ {"FILE_MAP_READ", Const, 0, ""},
+ {"FILE_MAP_WRITE", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_CREATION", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_SIZE", Const, 0, ""},
+ {"FILE_SHARE_DELETE", Const, 0, ""},
+ {"FILE_SHARE_READ", Const, 0, ""},
+ {"FILE_SHARE_WRITE", Const, 0, ""},
+ {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2, ""},
+ {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2, ""},
+ {"FILE_TYPE_CHAR", Const, 0, ""},
+ {"FILE_TYPE_DISK", Const, 0, ""},
+ {"FILE_TYPE_PIPE", Const, 0, ""},
+ {"FILE_TYPE_REMOTE", Const, 0, ""},
+ {"FILE_TYPE_UNKNOWN", Const, 0, ""},
+ {"FILE_WRITE_ATTRIBUTES", Const, 0, ""},
+ {"FLUSHO", Const, 0, ""},
+ {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0, ""},
+ {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0, ""},
+ {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0, ""},
+ {"FORMAT_MESSAGE_FROM_STRING", Const, 0, ""},
+ {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0, ""},
+ {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0, ""},
+ {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0, ""},
+ {"FSCTL_GET_REPARSE_POINT", Const, 4, ""},
+ {"F_ADDFILESIGS", Const, 0, ""},
+ {"F_ADDSIGS", Const, 0, ""},
+ {"F_ALLOCATEALL", Const, 0, ""},
+ {"F_ALLOCATECONTIG", Const, 0, ""},
+ {"F_CANCEL", Const, 0, ""},
+ {"F_CHKCLEAN", Const, 0, ""},
+ {"F_CLOSEM", Const, 1, ""},
+ {"F_DUP2FD", Const, 0, ""},
+ {"F_DUP2FD_CLOEXEC", Const, 1, ""},
+ {"F_DUPFD", Const, 0, ""},
+ {"F_DUPFD_CLOEXEC", Const, 0, ""},
+ {"F_EXLCK", Const, 0, ""},
+ {"F_FINDSIGS", Const, 16, ""},
+ {"F_FLUSH_DATA", Const, 0, ""},
+ {"F_FREEZE_FS", Const, 0, ""},
+ {"F_FSCTL", Const, 1, ""},
+ {"F_FSDIRMASK", Const, 1, ""},
+ {"F_FSIN", Const, 1, ""},
+ {"F_FSINOUT", Const, 1, ""},
+ {"F_FSOUT", Const, 1, ""},
+ {"F_FSPRIV", Const, 1, ""},
+ {"F_FSVOID", Const, 1, ""},
+ {"F_FULLFSYNC", Const, 0, ""},
+ {"F_GETCODEDIR", Const, 16, ""},
+ {"F_GETFD", Const, 0, ""},
+ {"F_GETFL", Const, 0, ""},
+ {"F_GETLEASE", Const, 0, ""},
+ {"F_GETLK", Const, 0, ""},
+ {"F_GETLK64", Const, 0, ""},
+ {"F_GETLKPID", Const, 0, ""},
+ {"F_GETNOSIGPIPE", Const, 0, ""},
+ {"F_GETOWN", Const, 0, ""},
+ {"F_GETOWN_EX", Const, 0, ""},
+ {"F_GETPATH", Const, 0, ""},
+ {"F_GETPATH_MTMINFO", Const, 0, ""},
+ {"F_GETPIPE_SZ", Const, 0, ""},
+ {"F_GETPROTECTIONCLASS", Const, 0, ""},
+ {"F_GETPROTECTIONLEVEL", Const, 16, ""},
+ {"F_GETSIG", Const, 0, ""},
+ {"F_GLOBAL_NOCACHE", Const, 0, ""},
+ {"F_LOCK", Const, 0, ""},
+ {"F_LOG2PHYS", Const, 0, ""},
+ {"F_LOG2PHYS_EXT", Const, 0, ""},
+ {"F_MARKDEPENDENCY", Const, 0, ""},
+ {"F_MAXFD", Const, 1, ""},
+ {"F_NOCACHE", Const, 0, ""},
+ {"F_NODIRECT", Const, 0, ""},
+ {"F_NOTIFY", Const, 0, ""},
+ {"F_OGETLK", Const, 0, ""},
+ {"F_OK", Const, 0, ""},
+ {"F_OSETLK", Const, 0, ""},
+ {"F_OSETLKW", Const, 0, ""},
+ {"F_PARAM_MASK", Const, 1, ""},
+ {"F_PARAM_MAX", Const, 1, ""},
+ {"F_PATHPKG_CHECK", Const, 0, ""},
+ {"F_PEOFPOSMODE", Const, 0, ""},
+ {"F_PREALLOCATE", Const, 0, ""},
+ {"F_RDADVISE", Const, 0, ""},
+ {"F_RDAHEAD", Const, 0, ""},
+ {"F_RDLCK", Const, 0, ""},
+ {"F_READAHEAD", Const, 0, ""},
+ {"F_READBOOTSTRAP", Const, 0, ""},
+ {"F_SETBACKINGSTORE", Const, 0, ""},
+ {"F_SETFD", Const, 0, ""},
+ {"F_SETFL", Const, 0, ""},
+ {"F_SETLEASE", Const, 0, ""},
+ {"F_SETLK", Const, 0, ""},
+ {"F_SETLK64", Const, 0, ""},
+ {"F_SETLKW", Const, 0, ""},
+ {"F_SETLKW64", Const, 0, ""},
+ {"F_SETLKWTIMEOUT", Const, 16, ""},
+ {"F_SETLK_REMOTE", Const, 0, ""},
+ {"F_SETNOSIGPIPE", Const, 0, ""},
+ {"F_SETOWN", Const, 0, ""},
+ {"F_SETOWN_EX", Const, 0, ""},
+ {"F_SETPIPE_SZ", Const, 0, ""},
+ {"F_SETPROTECTIONCLASS", Const, 0, ""},
+ {"F_SETSIG", Const, 0, ""},
+ {"F_SETSIZE", Const, 0, ""},
+ {"F_SHLCK", Const, 0, ""},
+ {"F_SINGLE_WRITER", Const, 16, ""},
+ {"F_TEST", Const, 0, ""},
+ {"F_THAW_FS", Const, 0, ""},
+ {"F_TLOCK", Const, 0, ""},
+ {"F_TRANSCODEKEY", Const, 16, ""},
+ {"F_ULOCK", Const, 0, ""},
+ {"F_UNLCK", Const, 0, ""},
+ {"F_UNLCKSYS", Const, 0, ""},
+ {"F_VOLPOSMODE", Const, 0, ""},
+ {"F_WRITEBOOTSTRAP", Const, 0, ""},
+ {"F_WRLCK", Const, 0, ""},
+ {"Faccessat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) (err error)"},
+ {"Fallocate", Func, 0, "func(fd int, mode uint32, off int64, len int64) (err error)"},
+ {"Fbootstraptransfer_t", Type, 0, ""},
+ {"Fbootstraptransfer_t.Buffer", Field, 0, ""},
+ {"Fbootstraptransfer_t.Length", Field, 0, ""},
+ {"Fbootstraptransfer_t.Offset", Field, 0, ""},
+ {"Fchdir", Func, 0, "func(fd int) (err error)"},
+ {"Fchflags", Func, 0, ""},
+ {"Fchmod", Func, 0, "func(fd int, mode uint32) (err error)"},
+ {"Fchmodat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) error"},
+ {"Fchown", Func, 0, "func(fd int, uid int, gid int) (err error)"},
+ {"Fchownat", Func, 0, "func(dirfd int, path string, uid int, gid int, flags int) (err error)"},
+ {"FcntlFlock", Func, 3, "func(fd uintptr, cmd int, lk *Flock_t) error"},
+ {"FdSet", Type, 0, ""},
+ {"FdSet.Bits", Field, 0, ""},
+ {"FdSet.X__fds_bits", Field, 0, ""},
+ {"Fdatasync", Func, 0, "func(fd int) (err error)"},
+ {"FileNotifyInformation", Type, 0, ""},
+ {"FileNotifyInformation.Action", Field, 0, ""},
+ {"FileNotifyInformation.FileName", Field, 0, ""},
+ {"FileNotifyInformation.FileNameLength", Field, 0, ""},
+ {"FileNotifyInformation.NextEntryOffset", Field, 0, ""},
+ {"Filetime", Type, 0, ""},
+ {"Filetime.HighDateTime", Field, 0, ""},
+ {"Filetime.LowDateTime", Field, 0, ""},
+ {"FindClose", Func, 0, ""},
+ {"FindFirstFile", Func, 0, ""},
+ {"FindNextFile", Func, 0, ""},
+ {"Flock", Func, 0, "func(fd int, how int) (err error)"},
+ {"Flock_t", Type, 0, ""},
+ {"Flock_t.Len", Field, 0, ""},
+ {"Flock_t.Pad_cgo_0", Field, 0, ""},
+ {"Flock_t.Pad_cgo_1", Field, 3, ""},
+ {"Flock_t.Pid", Field, 0, ""},
+ {"Flock_t.Start", Field, 0, ""},
+ {"Flock_t.Sysid", Field, 0, ""},
+ {"Flock_t.Type", Field, 0, ""},
+ {"Flock_t.Whence", Field, 0, ""},
+ {"FlushBpf", Func, 0, ""},
+ {"FlushFileBuffers", Func, 0, ""},
+ {"FlushViewOfFile", Func, 0, ""},
+ {"ForkExec", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)"},
+ {"ForkLock", Var, 0, ""},
+ {"FormatMessage", Func, 0, ""},
+ {"Fpathconf", Func, 0, ""},
+ {"FreeAddrInfoW", Func, 1, ""},
+ {"FreeEnvironmentStrings", Func, 0, ""},
+ {"FreeLibrary", Func, 0, ""},
+ {"Fsid", Type, 0, ""},
+ {"Fsid.Val", Field, 0, ""},
+ {"Fsid.X__fsid_val", Field, 2, ""},
+ {"Fsid.X__val", Field, 0, ""},
+ {"Fstat", Func, 0, "func(fd int, stat *Stat_t) (err error)"},
+ {"Fstatat", Func, 12, ""},
+ {"Fstatfs", Func, 0, "func(fd int, buf *Statfs_t) (err error)"},
+ {"Fstore_t", Type, 0, ""},
+ {"Fstore_t.Bytesalloc", Field, 0, ""},
+ {"Fstore_t.Flags", Field, 0, ""},
+ {"Fstore_t.Length", Field, 0, ""},
+ {"Fstore_t.Offset", Field, 0, ""},
+ {"Fstore_t.Posmode", Field, 0, ""},
+ {"Fsync", Func, 0, "func(fd int) (err error)"},
+ {"Ftruncate", Func, 0, "func(fd int, length int64) (err error)"},
+ {"FullPath", Func, 4, ""},
+ {"Futimes", Func, 0, "func(fd int, tv []Timeval) (err error)"},
+ {"Futimesat", Func, 0, "func(dirfd int, path string, tv []Timeval) (err error)"},
+ {"GENERIC_ALL", Const, 0, ""},
+ {"GENERIC_EXECUTE", Const, 0, ""},
+ {"GENERIC_READ", Const, 0, ""},
+ {"GENERIC_WRITE", Const, 0, ""},
+ {"GUID", Type, 1, ""},
+ {"GUID.Data1", Field, 1, ""},
+ {"GUID.Data2", Field, 1, ""},
+ {"GUID.Data3", Field, 1, ""},
+ {"GUID.Data4", Field, 1, ""},
+ {"GetAcceptExSockaddrs", Func, 0, ""},
+ {"GetAdaptersInfo", Func, 0, ""},
+ {"GetAddrInfoW", Func, 1, ""},
+ {"GetCommandLine", Func, 0, ""},
+ {"GetComputerName", Func, 0, ""},
+ {"GetConsoleMode", Func, 1, ""},
+ {"GetCurrentDirectory", Func, 0, ""},
+ {"GetCurrentProcess", Func, 0, ""},
+ {"GetEnvironmentStrings", Func, 0, ""},
+ {"GetEnvironmentVariable", Func, 0, ""},
+ {"GetExitCodeProcess", Func, 0, ""},
+ {"GetFileAttributes", Func, 0, ""},
+ {"GetFileAttributesEx", Func, 0, ""},
+ {"GetFileExInfoStandard", Const, 0, ""},
+ {"GetFileExMaxInfoLevel", Const, 0, ""},
+ {"GetFileInformationByHandle", Func, 0, ""},
+ {"GetFileType", Func, 0, ""},
+ {"GetFullPathName", Func, 0, ""},
+ {"GetHostByName", Func, 0, ""},
+ {"GetIfEntry", Func, 0, ""},
+ {"GetLastError", Func, 0, ""},
+ {"GetLengthSid", Func, 0, ""},
+ {"GetLongPathName", Func, 0, ""},
+ {"GetProcAddress", Func, 0, ""},
+ {"GetProcessTimes", Func, 0, ""},
+ {"GetProtoByName", Func, 0, ""},
+ {"GetQueuedCompletionStatus", Func, 0, ""},
+ {"GetServByName", Func, 0, ""},
+ {"GetShortPathName", Func, 0, ""},
+ {"GetStartupInfo", Func, 0, ""},
+ {"GetStdHandle", Func, 0, ""},
+ {"GetSystemTimeAsFileTime", Func, 0, ""},
+ {"GetTempPath", Func, 0, ""},
+ {"GetTimeZoneInformation", Func, 0, ""},
+ {"GetTokenInformation", Func, 0, ""},
+ {"GetUserNameEx", Func, 0, ""},
+ {"GetUserProfileDirectory", Func, 0, ""},
+ {"GetVersion", Func, 0, ""},
+ {"Getcwd", Func, 0, "func(buf []byte) (n int, err error)"},
+ {"Getdents", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
+ {"Getdirentries", Func, 0, ""},
+ {"Getdtablesize", Func, 0, ""},
+ {"Getegid", Func, 0, "func() (egid int)"},
+ {"Getenv", Func, 0, "func(key string) (value string, found bool)"},
+ {"Geteuid", Func, 0, "func() (euid int)"},
+ {"Getfsstat", Func, 0, ""},
+ {"Getgid", Func, 0, "func() (gid int)"},
+ {"Getgroups", Func, 0, "func() (gids []int, err error)"},
+ {"Getpagesize", Func, 0, "func() int"},
+ {"Getpeername", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
+ {"Getpgid", Func, 0, "func(pid int) (pgid int, err error)"},
+ {"Getpgrp", Func, 0, "func() (pid int)"},
+ {"Getpid", Func, 0, "func() (pid int)"},
+ {"Getppid", Func, 0, "func() (ppid int)"},
+ {"Getpriority", Func, 0, "func(which int, who int) (prio int, err error)"},
+ {"Getrlimit", Func, 0, "func(resource int, rlim *Rlimit) (err error)"},
+ {"Getrusage", Func, 0, "func(who int, rusage *Rusage) (err error)"},
+ {"Getsid", Func, 0, ""},
+ {"Getsockname", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
+ {"Getsockopt", Func, 1, ""},
+ {"GetsockoptByte", Func, 0, ""},
+ {"GetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int) (*ICMPv6Filter, error)"},
+ {"GetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int) (*IPMreq, error)"},
+ {"GetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int) (*IPMreqn, error)"},
+ {"GetsockoptIPv6MTUInfo", Func, 2, "func(fd int, level int, opt int) (*IPv6MTUInfo, error)"},
+ {"GetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int) (*IPv6Mreq, error)"},
+ {"GetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int) (value [4]byte, err error)"},
+ {"GetsockoptInt", Func, 0, "func(fd int, level int, opt int) (value int, err error)"},
+ {"GetsockoptUcred", Func, 1, "func(fd int, level int, opt int) (*Ucred, error)"},
+ {"Gettid", Func, 0, "func() (tid int)"},
+ {"Gettimeofday", Func, 0, "func(tv *Timeval) (err error)"},
+ {"Getuid", Func, 0, "func() (uid int)"},
+ {"Getwd", Func, 0, "func() (wd string, err error)"},
+ {"Getxattr", Func, 1, "func(path string, attr string, dest []byte) (sz int, err error)"},
+ {"HANDLE_FLAG_INHERIT", Const, 0, ""},
+ {"HKEY_CLASSES_ROOT", Const, 0, ""},
+ {"HKEY_CURRENT_CONFIG", Const, 0, ""},
+ {"HKEY_CURRENT_USER", Const, 0, ""},
+ {"HKEY_DYN_DATA", Const, 0, ""},
+ {"HKEY_LOCAL_MACHINE", Const, 0, ""},
+ {"HKEY_PERFORMANCE_DATA", Const, 0, ""},
+ {"HKEY_USERS", Const, 0, ""},
+ {"HUPCL", Const, 0, ""},
+ {"Handle", Type, 0, ""},
+ {"Hostent", Type, 0, ""},
+ {"Hostent.AddrList", Field, 0, ""},
+ {"Hostent.AddrType", Field, 0, ""},
+ {"Hostent.Aliases", Field, 0, ""},
+ {"Hostent.Length", Field, 0, ""},
+ {"Hostent.Name", Field, 0, ""},
+ {"ICANON", Const, 0, ""},
+ {"ICMP6_FILTER", Const, 2, ""},
+ {"ICMPV6_FILTER", Const, 2, ""},
+ {"ICMPv6Filter", Type, 2, ""},
+ {"ICMPv6Filter.Data", Field, 2, ""},
+ {"ICMPv6Filter.Filt", Field, 2, ""},
+ {"ICRNL", Const, 0, ""},
+ {"IEXTEN", Const, 0, ""},
+ {"IFAN_ARRIVAL", Const, 1, ""},
+ {"IFAN_DEPARTURE", Const, 1, ""},
+ {"IFA_ADDRESS", Const, 0, ""},
+ {"IFA_ANYCAST", Const, 0, ""},
+ {"IFA_BROADCAST", Const, 0, ""},
+ {"IFA_CACHEINFO", Const, 0, ""},
+ {"IFA_F_DADFAILED", Const, 0, ""},
+ {"IFA_F_DEPRECATED", Const, 0, ""},
+ {"IFA_F_HOMEADDRESS", Const, 0, ""},
+ {"IFA_F_NODAD", Const, 0, ""},
+ {"IFA_F_OPTIMISTIC", Const, 0, ""},
+ {"IFA_F_PERMANENT", Const, 0, ""},
+ {"IFA_F_SECONDARY", Const, 0, ""},
+ {"IFA_F_TEMPORARY", Const, 0, ""},
+ {"IFA_F_TENTATIVE", Const, 0, ""},
+ {"IFA_LABEL", Const, 0, ""},
+ {"IFA_LOCAL", Const, 0, ""},
+ {"IFA_MAX", Const, 0, ""},
+ {"IFA_MULTICAST", Const, 0, ""},
+ {"IFA_ROUTE", Const, 1, ""},
+ {"IFA_UNSPEC", Const, 0, ""},
+ {"IFF_ALLMULTI", Const, 0, ""},
+ {"IFF_ALTPHYS", Const, 0, ""},
+ {"IFF_AUTOMEDIA", Const, 0, ""},
+ {"IFF_BROADCAST", Const, 0, ""},
+ {"IFF_CANTCHANGE", Const, 0, ""},
+ {"IFF_CANTCONFIG", Const, 1, ""},
+ {"IFF_DEBUG", Const, 0, ""},
+ {"IFF_DRV_OACTIVE", Const, 0, ""},
+ {"IFF_DRV_RUNNING", Const, 0, ""},
+ {"IFF_DYING", Const, 0, ""},
+ {"IFF_DYNAMIC", Const, 0, ""},
+ {"IFF_LINK0", Const, 0, ""},
+ {"IFF_LINK1", Const, 0, ""},
+ {"IFF_LINK2", Const, 0, ""},
+ {"IFF_LOOPBACK", Const, 0, ""},
+ {"IFF_MASTER", Const, 0, ""},
+ {"IFF_MONITOR", Const, 0, ""},
+ {"IFF_MULTICAST", Const, 0, ""},
+ {"IFF_NOARP", Const, 0, ""},
+ {"IFF_NOTRAILERS", Const, 0, ""},
+ {"IFF_NO_PI", Const, 0, ""},
+ {"IFF_OACTIVE", Const, 0, ""},
+ {"IFF_ONE_QUEUE", Const, 0, ""},
+ {"IFF_POINTOPOINT", Const, 0, ""},
+ {"IFF_POINTTOPOINT", Const, 0, ""},
+ {"IFF_PORTSEL", Const, 0, ""},
+ {"IFF_PPROMISC", Const, 0, ""},
+ {"IFF_PROMISC", Const, 0, ""},
+ {"IFF_RENAMING", Const, 0, ""},
+ {"IFF_RUNNING", Const, 0, ""},
+ {"IFF_SIMPLEX", Const, 0, ""},
+ {"IFF_SLAVE", Const, 0, ""},
+ {"IFF_SMART", Const, 0, ""},
+ {"IFF_STATICARP", Const, 0, ""},
+ {"IFF_TAP", Const, 0, ""},
+ {"IFF_TUN", Const, 0, ""},
+ {"IFF_TUN_EXCL", Const, 0, ""},
+ {"IFF_UP", Const, 0, ""},
+ {"IFF_VNET_HDR", Const, 0, ""},
+ {"IFLA_ADDRESS", Const, 0, ""},
+ {"IFLA_BROADCAST", Const, 0, ""},
+ {"IFLA_COST", Const, 0, ""},
+ {"IFLA_IFALIAS", Const, 0, ""},
+ {"IFLA_IFNAME", Const, 0, ""},
+ {"IFLA_LINK", Const, 0, ""},
+ {"IFLA_LINKINFO", Const, 0, ""},
+ {"IFLA_LINKMODE", Const, 0, ""},
+ {"IFLA_MAP", Const, 0, ""},
+ {"IFLA_MASTER", Const, 0, ""},
+ {"IFLA_MAX", Const, 0, ""},
+ {"IFLA_MTU", Const, 0, ""},
+ {"IFLA_NET_NS_PID", Const, 0, ""},
+ {"IFLA_OPERSTATE", Const, 0, ""},
+ {"IFLA_PRIORITY", Const, 0, ""},
+ {"IFLA_PROTINFO", Const, 0, ""},
+ {"IFLA_QDISC", Const, 0, ""},
+ {"IFLA_STATS", Const, 0, ""},
+ {"IFLA_TXQLEN", Const, 0, ""},
+ {"IFLA_UNSPEC", Const, 0, ""},
+ {"IFLA_WEIGHT", Const, 0, ""},
+ {"IFLA_WIRELESS", Const, 0, ""},
+ {"IFNAMSIZ", Const, 0, ""},
+ {"IFT_1822", Const, 0, ""},
+ {"IFT_A12MPPSWITCH", Const, 0, ""},
+ {"IFT_AAL2", Const, 0, ""},
+ {"IFT_AAL5", Const, 0, ""},
+ {"IFT_ADSL", Const, 0, ""},
+ {"IFT_AFLANE8023", Const, 0, ""},
+ {"IFT_AFLANE8025", Const, 0, ""},
+ {"IFT_ARAP", Const, 0, ""},
+ {"IFT_ARCNET", Const, 0, ""},
+ {"IFT_ARCNETPLUS", Const, 0, ""},
+ {"IFT_ASYNC", Const, 0, ""},
+ {"IFT_ATM", Const, 0, ""},
+ {"IFT_ATMDXI", Const, 0, ""},
+ {"IFT_ATMFUNI", Const, 0, ""},
+ {"IFT_ATMIMA", Const, 0, ""},
+ {"IFT_ATMLOGICAL", Const, 0, ""},
+ {"IFT_ATMRADIO", Const, 0, ""},
+ {"IFT_ATMSUBINTERFACE", Const, 0, ""},
+ {"IFT_ATMVCIENDPT", Const, 0, ""},
+ {"IFT_ATMVIRTUAL", Const, 0, ""},
+ {"IFT_BGPPOLICYACCOUNTING", Const, 0, ""},
+ {"IFT_BLUETOOTH", Const, 1, ""},
+ {"IFT_BRIDGE", Const, 0, ""},
+ {"IFT_BSC", Const, 0, ""},
+ {"IFT_CARP", Const, 0, ""},
+ {"IFT_CCTEMUL", Const, 0, ""},
+ {"IFT_CELLULAR", Const, 0, ""},
+ {"IFT_CEPT", Const, 0, ""},
+ {"IFT_CES", Const, 0, ""},
+ {"IFT_CHANNEL", Const, 0, ""},
+ {"IFT_CNR", Const, 0, ""},
+ {"IFT_COFFEE", Const, 0, ""},
+ {"IFT_COMPOSITELINK", Const, 0, ""},
+ {"IFT_DCN", Const, 0, ""},
+ {"IFT_DIGITALPOWERLINE", Const, 0, ""},
+ {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0, ""},
+ {"IFT_DLSW", Const, 0, ""},
+ {"IFT_DOCSCABLEDOWNSTREAM", Const, 0, ""},
+ {"IFT_DOCSCABLEMACLAYER", Const, 0, ""},
+ {"IFT_DOCSCABLEUPSTREAM", Const, 0, ""},
+ {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1, ""},
+ {"IFT_DS0", Const, 0, ""},
+ {"IFT_DS0BUNDLE", Const, 0, ""},
+ {"IFT_DS1FDL", Const, 0, ""},
+ {"IFT_DS3", Const, 0, ""},
+ {"IFT_DTM", Const, 0, ""},
+ {"IFT_DUMMY", Const, 1, ""},
+ {"IFT_DVBASILN", Const, 0, ""},
+ {"IFT_DVBASIOUT", Const, 0, ""},
+ {"IFT_DVBRCCDOWNSTREAM", Const, 0, ""},
+ {"IFT_DVBRCCMACLAYER", Const, 0, ""},
+ {"IFT_DVBRCCUPSTREAM", Const, 0, ""},
+ {"IFT_ECONET", Const, 1, ""},
+ {"IFT_ENC", Const, 0, ""},
+ {"IFT_EON", Const, 0, ""},
+ {"IFT_EPLRS", Const, 0, ""},
+ {"IFT_ESCON", Const, 0, ""},
+ {"IFT_ETHER", Const, 0, ""},
+ {"IFT_FAITH", Const, 0, ""},
+ {"IFT_FAST", Const, 0, ""},
+ {"IFT_FASTETHER", Const, 0, ""},
+ {"IFT_FASTETHERFX", Const, 0, ""},
+ {"IFT_FDDI", Const, 0, ""},
+ {"IFT_FIBRECHANNEL", Const, 0, ""},
+ {"IFT_FRAMERELAYINTERCONNECT", Const, 0, ""},
+ {"IFT_FRAMERELAYMPI", Const, 0, ""},
+ {"IFT_FRDLCIENDPT", Const, 0, ""},
+ {"IFT_FRELAY", Const, 0, ""},
+ {"IFT_FRELAYDCE", Const, 0, ""},
+ {"IFT_FRF16MFRBUNDLE", Const, 0, ""},
+ {"IFT_FRFORWARD", Const, 0, ""},
+ {"IFT_G703AT2MB", Const, 0, ""},
+ {"IFT_G703AT64K", Const, 0, ""},
+ {"IFT_GIF", Const, 0, ""},
+ {"IFT_GIGABITETHERNET", Const, 0, ""},
+ {"IFT_GR303IDT", Const, 0, ""},
+ {"IFT_GR303RDT", Const, 0, ""},
+ {"IFT_H323GATEKEEPER", Const, 0, ""},
+ {"IFT_H323PROXY", Const, 0, ""},
+ {"IFT_HDH1822", Const, 0, ""},
+ {"IFT_HDLC", Const, 0, ""},
+ {"IFT_HDSL2", Const, 0, ""},
+ {"IFT_HIPERLAN2", Const, 0, ""},
+ {"IFT_HIPPI", Const, 0, ""},
+ {"IFT_HIPPIINTERFACE", Const, 0, ""},
+ {"IFT_HOSTPAD", Const, 0, ""},
+ {"IFT_HSSI", Const, 0, ""},
+ {"IFT_HY", Const, 0, ""},
+ {"IFT_IBM370PARCHAN", Const, 0, ""},
+ {"IFT_IDSL", Const, 0, ""},
+ {"IFT_IEEE1394", Const, 0, ""},
+ {"IFT_IEEE80211", Const, 0, ""},
+ {"IFT_IEEE80212", Const, 0, ""},
+ {"IFT_IEEE8023ADLAG", Const, 0, ""},
+ {"IFT_IFGSN", Const, 0, ""},
+ {"IFT_IMT", Const, 0, ""},
+ {"IFT_INFINIBAND", Const, 1, ""},
+ {"IFT_INTERLEAVE", Const, 0, ""},
+ {"IFT_IP", Const, 0, ""},
+ {"IFT_IPFORWARD", Const, 0, ""},
+ {"IFT_IPOVERATM", Const, 0, ""},
+ {"IFT_IPOVERCDLC", Const, 0, ""},
+ {"IFT_IPOVERCLAW", Const, 0, ""},
+ {"IFT_IPSWITCH", Const, 0, ""},
+ {"IFT_IPXIP", Const, 0, ""},
+ {"IFT_ISDN", Const, 0, ""},
+ {"IFT_ISDNBASIC", Const, 0, ""},
+ {"IFT_ISDNPRIMARY", Const, 0, ""},
+ {"IFT_ISDNS", Const, 0, ""},
+ {"IFT_ISDNU", Const, 0, ""},
+ {"IFT_ISO88022LLC", Const, 0, ""},
+ {"IFT_ISO88023", Const, 0, ""},
+ {"IFT_ISO88024", Const, 0, ""},
+ {"IFT_ISO88025", Const, 0, ""},
+ {"IFT_ISO88025CRFPINT", Const, 0, ""},
+ {"IFT_ISO88025DTR", Const, 0, ""},
+ {"IFT_ISO88025FIBER", Const, 0, ""},
+ {"IFT_ISO88026", Const, 0, ""},
+ {"IFT_ISUP", Const, 0, ""},
+ {"IFT_L2VLAN", Const, 0, ""},
+ {"IFT_L3IPVLAN", Const, 0, ""},
+ {"IFT_L3IPXVLAN", Const, 0, ""},
+ {"IFT_LAPB", Const, 0, ""},
+ {"IFT_LAPD", Const, 0, ""},
+ {"IFT_LAPF", Const, 0, ""},
+ {"IFT_LINEGROUP", Const, 1, ""},
+ {"IFT_LOCALTALK", Const, 0, ""},
+ {"IFT_LOOP", Const, 0, ""},
+ {"IFT_MEDIAMAILOVERIP", Const, 0, ""},
+ {"IFT_MFSIGLINK", Const, 0, ""},
+ {"IFT_MIOX25", Const, 0, ""},
+ {"IFT_MODEM", Const, 0, ""},
+ {"IFT_MPC", Const, 0, ""},
+ {"IFT_MPLS", Const, 0, ""},
+ {"IFT_MPLSTUNNEL", Const, 0, ""},
+ {"IFT_MSDSL", Const, 0, ""},
+ {"IFT_MVL", Const, 0, ""},
+ {"IFT_MYRINET", Const, 0, ""},
+ {"IFT_NFAS", Const, 0, ""},
+ {"IFT_NSIP", Const, 0, ""},
+ {"IFT_OPTICALCHANNEL", Const, 0, ""},
+ {"IFT_OPTICALTRANSPORT", Const, 0, ""},
+ {"IFT_OTHER", Const, 0, ""},
+ {"IFT_P10", Const, 0, ""},
+ {"IFT_P80", Const, 0, ""},
+ {"IFT_PARA", Const, 0, ""},
+ {"IFT_PDP", Const, 0, ""},
+ {"IFT_PFLOG", Const, 0, ""},
+ {"IFT_PFLOW", Const, 1, ""},
+ {"IFT_PFSYNC", Const, 0, ""},
+ {"IFT_PLC", Const, 0, ""},
+ {"IFT_PON155", Const, 1, ""},
+ {"IFT_PON622", Const, 1, ""},
+ {"IFT_POS", Const, 0, ""},
+ {"IFT_PPP", Const, 0, ""},
+ {"IFT_PPPMULTILINKBUNDLE", Const, 0, ""},
+ {"IFT_PROPATM", Const, 1, ""},
+ {"IFT_PROPBWAP2MP", Const, 0, ""},
+ {"IFT_PROPCNLS", Const, 0, ""},
+ {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0, ""},
+ {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0, ""},
+ {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0, ""},
+ {"IFT_PROPMUX", Const, 0, ""},
+ {"IFT_PROPVIRTUAL", Const, 0, ""},
+ {"IFT_PROPWIRELESSP2P", Const, 0, ""},
+ {"IFT_PTPSERIAL", Const, 0, ""},
+ {"IFT_PVC", Const, 0, ""},
+ {"IFT_Q2931", Const, 1, ""},
+ {"IFT_QLLC", Const, 0, ""},
+ {"IFT_RADIOMAC", Const, 0, ""},
+ {"IFT_RADSL", Const, 0, ""},
+ {"IFT_REACHDSL", Const, 0, ""},
+ {"IFT_RFC1483", Const, 0, ""},
+ {"IFT_RS232", Const, 0, ""},
+ {"IFT_RSRB", Const, 0, ""},
+ {"IFT_SDLC", Const, 0, ""},
+ {"IFT_SDSL", Const, 0, ""},
+ {"IFT_SHDSL", Const, 0, ""},
+ {"IFT_SIP", Const, 0, ""},
+ {"IFT_SIPSIG", Const, 1, ""},
+ {"IFT_SIPTG", Const, 1, ""},
+ {"IFT_SLIP", Const, 0, ""},
+ {"IFT_SMDSDXI", Const, 0, ""},
+ {"IFT_SMDSICIP", Const, 0, ""},
+ {"IFT_SONET", Const, 0, ""},
+ {"IFT_SONETOVERHEADCHANNEL", Const, 0, ""},
+ {"IFT_SONETPATH", Const, 0, ""},
+ {"IFT_SONETVT", Const, 0, ""},
+ {"IFT_SRP", Const, 0, ""},
+ {"IFT_SS7SIGLINK", Const, 0, ""},
+ {"IFT_STACKTOSTACK", Const, 0, ""},
+ {"IFT_STARLAN", Const, 0, ""},
+ {"IFT_STF", Const, 0, ""},
+ {"IFT_T1", Const, 0, ""},
+ {"IFT_TDLC", Const, 0, ""},
+ {"IFT_TELINK", Const, 1, ""},
+ {"IFT_TERMPAD", Const, 0, ""},
+ {"IFT_TR008", Const, 0, ""},
+ {"IFT_TRANSPHDLC", Const, 0, ""},
+ {"IFT_TUNNEL", Const, 0, ""},
+ {"IFT_ULTRA", Const, 0, ""},
+ {"IFT_USB", Const, 0, ""},
+ {"IFT_V11", Const, 0, ""},
+ {"IFT_V35", Const, 0, ""},
+ {"IFT_V36", Const, 0, ""},
+ {"IFT_V37", Const, 0, ""},
+ {"IFT_VDSL", Const, 0, ""},
+ {"IFT_VIRTUALIPADDRESS", Const, 0, ""},
+ {"IFT_VIRTUALTG", Const, 1, ""},
+ {"IFT_VOICEDID", Const, 1, ""},
+ {"IFT_VOICEEM", Const, 0, ""},
+ {"IFT_VOICEEMFGD", Const, 1, ""},
+ {"IFT_VOICEENCAP", Const, 0, ""},
+ {"IFT_VOICEFGDEANA", Const, 1, ""},
+ {"IFT_VOICEFXO", Const, 0, ""},
+ {"IFT_VOICEFXS", Const, 0, ""},
+ {"IFT_VOICEOVERATM", Const, 0, ""},
+ {"IFT_VOICEOVERCABLE", Const, 1, ""},
+ {"IFT_VOICEOVERFRAMERELAY", Const, 0, ""},
+ {"IFT_VOICEOVERIP", Const, 0, ""},
+ {"IFT_X213", Const, 0, ""},
+ {"IFT_X25", Const, 0, ""},
+ {"IFT_X25DDN", Const, 0, ""},
+ {"IFT_X25HUNTGROUP", Const, 0, ""},
+ {"IFT_X25MLP", Const, 0, ""},
+ {"IFT_X25PLE", Const, 0, ""},
+ {"IFT_XETHER", Const, 0, ""},
+ {"IGNBRK", Const, 0, ""},
+ {"IGNCR", Const, 0, ""},
+ {"IGNORE", Const, 0, ""},
+ {"IGNPAR", Const, 0, ""},
+ {"IMAXBEL", Const, 0, ""},
+ {"INFINITE", Const, 0, ""},
+ {"INLCR", Const, 0, ""},
+ {"INPCK", Const, 0, ""},
+ {"INVALID_FILE_ATTRIBUTES", Const, 0, ""},
+ {"IN_ACCESS", Const, 0, ""},
+ {"IN_ALL_EVENTS", Const, 0, ""},
+ {"IN_ATTRIB", Const, 0, ""},
+ {"IN_CLASSA_HOST", Const, 0, ""},
+ {"IN_CLASSA_MAX", Const, 0, ""},
+ {"IN_CLASSA_NET", Const, 0, ""},
+ {"IN_CLASSA_NSHIFT", Const, 0, ""},
+ {"IN_CLASSB_HOST", Const, 0, ""},
+ {"IN_CLASSB_MAX", Const, 0, ""},
+ {"IN_CLASSB_NET", Const, 0, ""},
+ {"IN_CLASSB_NSHIFT", Const, 0, ""},
+ {"IN_CLASSC_HOST", Const, 0, ""},
+ {"IN_CLASSC_NET", Const, 0, ""},
+ {"IN_CLASSC_NSHIFT", Const, 0, ""},
+ {"IN_CLASSD_HOST", Const, 0, ""},
+ {"IN_CLASSD_NET", Const, 0, ""},
+ {"IN_CLASSD_NSHIFT", Const, 0, ""},
+ {"IN_CLOEXEC", Const, 0, ""},
+ {"IN_CLOSE", Const, 0, ""},
+ {"IN_CLOSE_NOWRITE", Const, 0, ""},
+ {"IN_CLOSE_WRITE", Const, 0, ""},
+ {"IN_CREATE", Const, 0, ""},
+ {"IN_DELETE", Const, 0, ""},
+ {"IN_DELETE_SELF", Const, 0, ""},
+ {"IN_DONT_FOLLOW", Const, 0, ""},
+ {"IN_EXCL_UNLINK", Const, 0, ""},
+ {"IN_IGNORED", Const, 0, ""},
+ {"IN_ISDIR", Const, 0, ""},
+ {"IN_LINKLOCALNETNUM", Const, 0, ""},
+ {"IN_LOOPBACKNET", Const, 0, ""},
+ {"IN_MASK_ADD", Const, 0, ""},
+ {"IN_MODIFY", Const, 0, ""},
+ {"IN_MOVE", Const, 0, ""},
+ {"IN_MOVED_FROM", Const, 0, ""},
+ {"IN_MOVED_TO", Const, 0, ""},
+ {"IN_MOVE_SELF", Const, 0, ""},
+ {"IN_NONBLOCK", Const, 0, ""},
+ {"IN_ONESHOT", Const, 0, ""},
+ {"IN_ONLYDIR", Const, 0, ""},
+ {"IN_OPEN", Const, 0, ""},
+ {"IN_Q_OVERFLOW", Const, 0, ""},
+ {"IN_RFC3021_HOST", Const, 1, ""},
+ {"IN_RFC3021_MASK", Const, 1, ""},
+ {"IN_RFC3021_NET", Const, 1, ""},
+ {"IN_RFC3021_NSHIFT", Const, 1, ""},
+ {"IN_UNMOUNT", Const, 0, ""},
+ {"IOC_IN", Const, 1, ""},
+ {"IOC_INOUT", Const, 1, ""},
+ {"IOC_OUT", Const, 1, ""},
+ {"IOC_VENDOR", Const, 3, ""},
+ {"IOC_WS2", Const, 1, ""},
+ {"IO_REPARSE_TAG_SYMLINK", Const, 4, ""},
+ {"IPMreq", Type, 0, ""},
+ {"IPMreq.Interface", Field, 0, ""},
+ {"IPMreq.Multiaddr", Field, 0, ""},
+ {"IPMreqn", Type, 0, ""},
+ {"IPMreqn.Address", Field, 0, ""},
+ {"IPMreqn.Ifindex", Field, 0, ""},
+ {"IPMreqn.Multiaddr", Field, 0, ""},
+ {"IPPROTO_3PC", Const, 0, ""},
+ {"IPPROTO_ADFS", Const, 0, ""},
+ {"IPPROTO_AH", Const, 0, ""},
+ {"IPPROTO_AHIP", Const, 0, ""},
+ {"IPPROTO_APES", Const, 0, ""},
+ {"IPPROTO_ARGUS", Const, 0, ""},
+ {"IPPROTO_AX25", Const, 0, ""},
+ {"IPPROTO_BHA", Const, 0, ""},
+ {"IPPROTO_BLT", Const, 0, ""},
+ {"IPPROTO_BRSATMON", Const, 0, ""},
+ {"IPPROTO_CARP", Const, 0, ""},
+ {"IPPROTO_CFTP", Const, 0, ""},
+ {"IPPROTO_CHAOS", Const, 0, ""},
+ {"IPPROTO_CMTP", Const, 0, ""},
+ {"IPPROTO_COMP", Const, 0, ""},
+ {"IPPROTO_CPHB", Const, 0, ""},
+ {"IPPROTO_CPNX", Const, 0, ""},
+ {"IPPROTO_DCCP", Const, 0, ""},
+ {"IPPROTO_DDP", Const, 0, ""},
+ {"IPPROTO_DGP", Const, 0, ""},
+ {"IPPROTO_DIVERT", Const, 0, ""},
+ {"IPPROTO_DIVERT_INIT", Const, 3, ""},
+ {"IPPROTO_DIVERT_RESP", Const, 3, ""},
+ {"IPPROTO_DONE", Const, 0, ""},
+ {"IPPROTO_DSTOPTS", Const, 0, ""},
+ {"IPPROTO_EGP", Const, 0, ""},
+ {"IPPROTO_EMCON", Const, 0, ""},
+ {"IPPROTO_ENCAP", Const, 0, ""},
+ {"IPPROTO_EON", Const, 0, ""},
+ {"IPPROTO_ESP", Const, 0, ""},
+ {"IPPROTO_ETHERIP", Const, 0, ""},
+ {"IPPROTO_FRAGMENT", Const, 0, ""},
+ {"IPPROTO_GGP", Const, 0, ""},
+ {"IPPROTO_GMTP", Const, 0, ""},
+ {"IPPROTO_GRE", Const, 0, ""},
+ {"IPPROTO_HELLO", Const, 0, ""},
+ {"IPPROTO_HMP", Const, 0, ""},
+ {"IPPROTO_HOPOPTS", Const, 0, ""},
+ {"IPPROTO_ICMP", Const, 0, ""},
+ {"IPPROTO_ICMPV6", Const, 0, ""},
+ {"IPPROTO_IDP", Const, 0, ""},
+ {"IPPROTO_IDPR", Const, 0, ""},
+ {"IPPROTO_IDRP", Const, 0, ""},
+ {"IPPROTO_IGMP", Const, 0, ""},
+ {"IPPROTO_IGP", Const, 0, ""},
+ {"IPPROTO_IGRP", Const, 0, ""},
+ {"IPPROTO_IL", Const, 0, ""},
+ {"IPPROTO_INLSP", Const, 0, ""},
+ {"IPPROTO_INP", Const, 0, ""},
+ {"IPPROTO_IP", Const, 0, ""},
+ {"IPPROTO_IPCOMP", Const, 0, ""},
+ {"IPPROTO_IPCV", Const, 0, ""},
+ {"IPPROTO_IPEIP", Const, 0, ""},
+ {"IPPROTO_IPIP", Const, 0, ""},
+ {"IPPROTO_IPPC", Const, 0, ""},
+ {"IPPROTO_IPV4", Const, 0, ""},
+ {"IPPROTO_IPV6", Const, 0, ""},
+ {"IPPROTO_IPV6_ICMP", Const, 1, ""},
+ {"IPPROTO_IRTP", Const, 0, ""},
+ {"IPPROTO_KRYPTOLAN", Const, 0, ""},
+ {"IPPROTO_LARP", Const, 0, ""},
+ {"IPPROTO_LEAF1", Const, 0, ""},
+ {"IPPROTO_LEAF2", Const, 0, ""},
+ {"IPPROTO_MAX", Const, 0, ""},
+ {"IPPROTO_MAXID", Const, 0, ""},
+ {"IPPROTO_MEAS", Const, 0, ""},
+ {"IPPROTO_MH", Const, 1, ""},
+ {"IPPROTO_MHRP", Const, 0, ""},
+ {"IPPROTO_MICP", Const, 0, ""},
+ {"IPPROTO_MOBILE", Const, 0, ""},
+ {"IPPROTO_MPLS", Const, 1, ""},
+ {"IPPROTO_MTP", Const, 0, ""},
+ {"IPPROTO_MUX", Const, 0, ""},
+ {"IPPROTO_ND", Const, 0, ""},
+ {"IPPROTO_NHRP", Const, 0, ""},
+ {"IPPROTO_NONE", Const, 0, ""},
+ {"IPPROTO_NSP", Const, 0, ""},
+ {"IPPROTO_NVPII", Const, 0, ""},
+ {"IPPROTO_OLD_DIVERT", Const, 0, ""},
+ {"IPPROTO_OSPFIGP", Const, 0, ""},
+ {"IPPROTO_PFSYNC", Const, 0, ""},
+ {"IPPROTO_PGM", Const, 0, ""},
+ {"IPPROTO_PIGP", Const, 0, ""},
+ {"IPPROTO_PIM", Const, 0, ""},
+ {"IPPROTO_PRM", Const, 0, ""},
+ {"IPPROTO_PUP", Const, 0, ""},
+ {"IPPROTO_PVP", Const, 0, ""},
+ {"IPPROTO_RAW", Const, 0, ""},
+ {"IPPROTO_RCCMON", Const, 0, ""},
+ {"IPPROTO_RDP", Const, 0, ""},
+ {"IPPROTO_ROUTING", Const, 0, ""},
+ {"IPPROTO_RSVP", Const, 0, ""},
+ {"IPPROTO_RVD", Const, 0, ""},
+ {"IPPROTO_SATEXPAK", Const, 0, ""},
+ {"IPPROTO_SATMON", Const, 0, ""},
+ {"IPPROTO_SCCSP", Const, 0, ""},
+ {"IPPROTO_SCTP", Const, 0, ""},
+ {"IPPROTO_SDRP", Const, 0, ""},
+ {"IPPROTO_SEND", Const, 1, ""},
+ {"IPPROTO_SEP", Const, 0, ""},
+ {"IPPROTO_SKIP", Const, 0, ""},
+ {"IPPROTO_SPACER", Const, 0, ""},
+ {"IPPROTO_SRPC", Const, 0, ""},
+ {"IPPROTO_ST", Const, 0, ""},
+ {"IPPROTO_SVMTP", Const, 0, ""},
+ {"IPPROTO_SWIPE", Const, 0, ""},
+ {"IPPROTO_TCF", Const, 0, ""},
+ {"IPPROTO_TCP", Const, 0, ""},
+ {"IPPROTO_TLSP", Const, 0, ""},
+ {"IPPROTO_TP", Const, 0, ""},
+ {"IPPROTO_TPXX", Const, 0, ""},
+ {"IPPROTO_TRUNK1", Const, 0, ""},
+ {"IPPROTO_TRUNK2", Const, 0, ""},
+ {"IPPROTO_TTP", Const, 0, ""},
+ {"IPPROTO_UDP", Const, 0, ""},
+ {"IPPROTO_UDPLITE", Const, 0, ""},
+ {"IPPROTO_VINES", Const, 0, ""},
+ {"IPPROTO_VISA", Const, 0, ""},
+ {"IPPROTO_VMTP", Const, 0, ""},
+ {"IPPROTO_VRRP", Const, 1, ""},
+ {"IPPROTO_WBEXPAK", Const, 0, ""},
+ {"IPPROTO_WBMON", Const, 0, ""},
+ {"IPPROTO_WSN", Const, 0, ""},
+ {"IPPROTO_XNET", Const, 0, ""},
+ {"IPPROTO_XTP", Const, 0, ""},
+ {"IPV6_2292DSTOPTS", Const, 0, ""},
+ {"IPV6_2292HOPLIMIT", Const, 0, ""},
+ {"IPV6_2292HOPOPTS", Const, 0, ""},
+ {"IPV6_2292NEXTHOP", Const, 0, ""},
+ {"IPV6_2292PKTINFO", Const, 0, ""},
+ {"IPV6_2292PKTOPTIONS", Const, 0, ""},
+ {"IPV6_2292RTHDR", Const, 0, ""},
+ {"IPV6_ADDRFORM", Const, 0, ""},
+ {"IPV6_ADD_MEMBERSHIP", Const, 0, ""},
+ {"IPV6_AUTHHDR", Const, 0, ""},
+ {"IPV6_AUTH_LEVEL", Const, 1, ""},
+ {"IPV6_AUTOFLOWLABEL", Const, 0, ""},
+ {"IPV6_BINDANY", Const, 0, ""},
+ {"IPV6_BINDV6ONLY", Const, 0, ""},
+ {"IPV6_BOUND_IF", Const, 0, ""},
+ {"IPV6_CHECKSUM", Const, 0, ""},
+ {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0, ""},
+ {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
+ {"IPV6_DEFHLIM", Const, 0, ""},
+ {"IPV6_DONTFRAG", Const, 0, ""},
+ {"IPV6_DROP_MEMBERSHIP", Const, 0, ""},
+ {"IPV6_DSTOPTS", Const, 0, ""},
+ {"IPV6_ESP_NETWORK_LEVEL", Const, 1, ""},
+ {"IPV6_ESP_TRANS_LEVEL", Const, 1, ""},
+ {"IPV6_FAITH", Const, 0, ""},
+ {"IPV6_FLOWINFO_MASK", Const, 0, ""},
+ {"IPV6_FLOWLABEL_MASK", Const, 0, ""},
+ {"IPV6_FRAGTTL", Const, 0, ""},
+ {"IPV6_FW_ADD", Const, 0, ""},
+ {"IPV6_FW_DEL", Const, 0, ""},
+ {"IPV6_FW_FLUSH", Const, 0, ""},
+ {"IPV6_FW_GET", Const, 0, ""},
+ {"IPV6_FW_ZERO", Const, 0, ""},
+ {"IPV6_HLIMDEC", Const, 0, ""},
+ {"IPV6_HOPLIMIT", Const, 0, ""},
+ {"IPV6_HOPOPTS", Const, 0, ""},
+ {"IPV6_IPCOMP_LEVEL", Const, 1, ""},
+ {"IPV6_IPSEC_POLICY", Const, 0, ""},
+ {"IPV6_JOIN_ANYCAST", Const, 0, ""},
+ {"IPV6_JOIN_GROUP", Const, 0, ""},
+ {"IPV6_LEAVE_ANYCAST", Const, 0, ""},
+ {"IPV6_LEAVE_GROUP", Const, 0, ""},
+ {"IPV6_MAXHLIM", Const, 0, ""},
+ {"IPV6_MAXOPTHDR", Const, 0, ""},
+ {"IPV6_MAXPACKET", Const, 0, ""},
+ {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0, ""},
+ {"IPV6_MAX_MEMBERSHIPS", Const, 0, ""},
+ {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0, ""},
+ {"IPV6_MIN_MEMBERSHIPS", Const, 0, ""},
+ {"IPV6_MMTU", Const, 0, ""},
+ {"IPV6_MSFILTER", Const, 0, ""},
+ {"IPV6_MTU", Const, 0, ""},
+ {"IPV6_MTU_DISCOVER", Const, 0, ""},
+ {"IPV6_MULTICAST_HOPS", Const, 0, ""},
+ {"IPV6_MULTICAST_IF", Const, 0, ""},
+ {"IPV6_MULTICAST_LOOP", Const, 0, ""},
+ {"IPV6_NEXTHOP", Const, 0, ""},
+ {"IPV6_OPTIONS", Const, 1, ""},
+ {"IPV6_PATHMTU", Const, 0, ""},
+ {"IPV6_PIPEX", Const, 1, ""},
+ {"IPV6_PKTINFO", Const, 0, ""},
+ {"IPV6_PMTUDISC_DO", Const, 0, ""},
+ {"IPV6_PMTUDISC_DONT", Const, 0, ""},
+ {"IPV6_PMTUDISC_PROBE", Const, 0, ""},
+ {"IPV6_PMTUDISC_WANT", Const, 0, ""},
+ {"IPV6_PORTRANGE", Const, 0, ""},
+ {"IPV6_PORTRANGE_DEFAULT", Const, 0, ""},
+ {"IPV6_PORTRANGE_HIGH", Const, 0, ""},
+ {"IPV6_PORTRANGE_LOW", Const, 0, ""},
+ {"IPV6_PREFER_TEMPADDR", Const, 0, ""},
+ {"IPV6_RECVDSTOPTS", Const, 0, ""},
+ {"IPV6_RECVDSTPORT", Const, 3, ""},
+ {"IPV6_RECVERR", Const, 0, ""},
+ {"IPV6_RECVHOPLIMIT", Const, 0, ""},
+ {"IPV6_RECVHOPOPTS", Const, 0, ""},
+ {"IPV6_RECVPATHMTU", Const, 0, ""},
+ {"IPV6_RECVPKTINFO", Const, 0, ""},
+ {"IPV6_RECVRTHDR", Const, 0, ""},
+ {"IPV6_RECVTCLASS", Const, 0, ""},
+ {"IPV6_ROUTER_ALERT", Const, 0, ""},
+ {"IPV6_RTABLE", Const, 1, ""},
+ {"IPV6_RTHDR", Const, 0, ""},
+ {"IPV6_RTHDRDSTOPTS", Const, 0, ""},
+ {"IPV6_RTHDR_LOOSE", Const, 0, ""},
+ {"IPV6_RTHDR_STRICT", Const, 0, ""},
+ {"IPV6_RTHDR_TYPE_0", Const, 0, ""},
+ {"IPV6_RXDSTOPTS", Const, 0, ""},
+ {"IPV6_RXHOPOPTS", Const, 0, ""},
+ {"IPV6_SOCKOPT_RESERVED1", Const, 0, ""},
+ {"IPV6_TCLASS", Const, 0, ""},
+ {"IPV6_UNICAST_HOPS", Const, 0, ""},
+ {"IPV6_USE_MIN_MTU", Const, 0, ""},
+ {"IPV6_V6ONLY", Const, 0, ""},
+ {"IPV6_VERSION", Const, 0, ""},
+ {"IPV6_VERSION_MASK", Const, 0, ""},
+ {"IPV6_XFRM_POLICY", Const, 0, ""},
+ {"IP_ADD_MEMBERSHIP", Const, 0, ""},
+ {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0, ""},
+ {"IP_AUTH_LEVEL", Const, 1, ""},
+ {"IP_BINDANY", Const, 0, ""},
+ {"IP_BLOCK_SOURCE", Const, 0, ""},
+ {"IP_BOUND_IF", Const, 0, ""},
+ {"IP_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
+ {"IP_DEFAULT_MULTICAST_TTL", Const, 0, ""},
+ {"IP_DF", Const, 0, ""},
+ {"IP_DIVERTFL", Const, 3, ""},
+ {"IP_DONTFRAG", Const, 0, ""},
+ {"IP_DROP_MEMBERSHIP", Const, 0, ""},
+ {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0, ""},
+ {"IP_DUMMYNET3", Const, 0, ""},
+ {"IP_DUMMYNET_CONFIGURE", Const, 0, ""},
+ {"IP_DUMMYNET_DEL", Const, 0, ""},
+ {"IP_DUMMYNET_FLUSH", Const, 0, ""},
+ {"IP_DUMMYNET_GET", Const, 0, ""},
+ {"IP_EF", Const, 1, ""},
+ {"IP_ERRORMTU", Const, 1, ""},
+ {"IP_ESP_NETWORK_LEVEL", Const, 1, ""},
+ {"IP_ESP_TRANS_LEVEL", Const, 1, ""},
+ {"IP_FAITH", Const, 0, ""},
+ {"IP_FREEBIND", Const, 0, ""},
+ {"IP_FW3", Const, 0, ""},
+ {"IP_FW_ADD", Const, 0, ""},
+ {"IP_FW_DEL", Const, 0, ""},
+ {"IP_FW_FLUSH", Const, 0, ""},
+ {"IP_FW_GET", Const, 0, ""},
+ {"IP_FW_NAT_CFG", Const, 0, ""},
+ {"IP_FW_NAT_DEL", Const, 0, ""},
+ {"IP_FW_NAT_GET_CONFIG", Const, 0, ""},
+ {"IP_FW_NAT_GET_LOG", Const, 0, ""},
+ {"IP_FW_RESETLOG", Const, 0, ""},
+ {"IP_FW_TABLE_ADD", Const, 0, ""},
+ {"IP_FW_TABLE_DEL", Const, 0, ""},
+ {"IP_FW_TABLE_FLUSH", Const, 0, ""},
+ {"IP_FW_TABLE_GETSIZE", Const, 0, ""},
+ {"IP_FW_TABLE_LIST", Const, 0, ""},
+ {"IP_FW_ZERO", Const, 0, ""},
+ {"IP_HDRINCL", Const, 0, ""},
+ {"IP_IPCOMP_LEVEL", Const, 1, ""},
+ {"IP_IPSECFLOWINFO", Const, 1, ""},
+ {"IP_IPSEC_LOCAL_AUTH", Const, 1, ""},
+ {"IP_IPSEC_LOCAL_CRED", Const, 1, ""},
+ {"IP_IPSEC_LOCAL_ID", Const, 1, ""},
+ {"IP_IPSEC_POLICY", Const, 0, ""},
+ {"IP_IPSEC_REMOTE_AUTH", Const, 1, ""},
+ {"IP_IPSEC_REMOTE_CRED", Const, 1, ""},
+ {"IP_IPSEC_REMOTE_ID", Const, 1, ""},
+ {"IP_MAXPACKET", Const, 0, ""},
+ {"IP_MAX_GROUP_SRC_FILTER", Const, 0, ""},
+ {"IP_MAX_MEMBERSHIPS", Const, 0, ""},
+ {"IP_MAX_SOCK_MUTE_FILTER", Const, 0, ""},
+ {"IP_MAX_SOCK_SRC_FILTER", Const, 0, ""},
+ {"IP_MAX_SOURCE_FILTER", Const, 0, ""},
+ {"IP_MF", Const, 0, ""},
+ {"IP_MINFRAGSIZE", Const, 1, ""},
+ {"IP_MINTTL", Const, 0, ""},
+ {"IP_MIN_MEMBERSHIPS", Const, 0, ""},
+ {"IP_MSFILTER", Const, 0, ""},
+ {"IP_MSS", Const, 0, ""},
+ {"IP_MTU", Const, 0, ""},
+ {"IP_MTU_DISCOVER", Const, 0, ""},
+ {"IP_MULTICAST_IF", Const, 0, ""},
+ {"IP_MULTICAST_IFINDEX", Const, 0, ""},
+ {"IP_MULTICAST_LOOP", Const, 0, ""},
+ {"IP_MULTICAST_TTL", Const, 0, ""},
+ {"IP_MULTICAST_VIF", Const, 0, ""},
+ {"IP_NAT__XXX", Const, 0, ""},
+ {"IP_OFFMASK", Const, 0, ""},
+ {"IP_OLD_FW_ADD", Const, 0, ""},
+ {"IP_OLD_FW_DEL", Const, 0, ""},
+ {"IP_OLD_FW_FLUSH", Const, 0, ""},
+ {"IP_OLD_FW_GET", Const, 0, ""},
+ {"IP_OLD_FW_RESETLOG", Const, 0, ""},
+ {"IP_OLD_FW_ZERO", Const, 0, ""},
+ {"IP_ONESBCAST", Const, 0, ""},
+ {"IP_OPTIONS", Const, 0, ""},
+ {"IP_ORIGDSTADDR", Const, 0, ""},
+ {"IP_PASSSEC", Const, 0, ""},
+ {"IP_PIPEX", Const, 1, ""},
+ {"IP_PKTINFO", Const, 0, ""},
+ {"IP_PKTOPTIONS", Const, 0, ""},
+ {"IP_PMTUDISC", Const, 0, ""},
+ {"IP_PMTUDISC_DO", Const, 0, ""},
+ {"IP_PMTUDISC_DONT", Const, 0, ""},
+ {"IP_PMTUDISC_PROBE", Const, 0, ""},
+ {"IP_PMTUDISC_WANT", Const, 0, ""},
+ {"IP_PORTRANGE", Const, 0, ""},
+ {"IP_PORTRANGE_DEFAULT", Const, 0, ""},
+ {"IP_PORTRANGE_HIGH", Const, 0, ""},
+ {"IP_PORTRANGE_LOW", Const, 0, ""},
+ {"IP_RECVDSTADDR", Const, 0, ""},
+ {"IP_RECVDSTPORT", Const, 1, ""},
+ {"IP_RECVERR", Const, 0, ""},
+ {"IP_RECVIF", Const, 0, ""},
+ {"IP_RECVOPTS", Const, 0, ""},
+ {"IP_RECVORIGDSTADDR", Const, 0, ""},
+ {"IP_RECVPKTINFO", Const, 0, ""},
+ {"IP_RECVRETOPTS", Const, 0, ""},
+ {"IP_RECVRTABLE", Const, 1, ""},
+ {"IP_RECVTOS", Const, 0, ""},
+ {"IP_RECVTTL", Const, 0, ""},
+ {"IP_RETOPTS", Const, 0, ""},
+ {"IP_RF", Const, 0, ""},
+ {"IP_ROUTER_ALERT", Const, 0, ""},
+ {"IP_RSVP_OFF", Const, 0, ""},
+ {"IP_RSVP_ON", Const, 0, ""},
+ {"IP_RSVP_VIF_OFF", Const, 0, ""},
+ {"IP_RSVP_VIF_ON", Const, 0, ""},
+ {"IP_RTABLE", Const, 1, ""},
+ {"IP_SENDSRCADDR", Const, 0, ""},
+ {"IP_STRIPHDR", Const, 0, ""},
+ {"IP_TOS", Const, 0, ""},
+ {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0, ""},
+ {"IP_TRANSPARENT", Const, 0, ""},
+ {"IP_TTL", Const, 0, ""},
+ {"IP_UNBLOCK_SOURCE", Const, 0, ""},
+ {"IP_XFRM_POLICY", Const, 0, ""},
+ {"IPv6MTUInfo", Type, 2, ""},
+ {"IPv6MTUInfo.Addr", Field, 2, ""},
+ {"IPv6MTUInfo.Mtu", Field, 2, ""},
+ {"IPv6Mreq", Type, 0, ""},
+ {"IPv6Mreq.Interface", Field, 0, ""},
+ {"IPv6Mreq.Multiaddr", Field, 0, ""},
+ {"ISIG", Const, 0, ""},
+ {"ISTRIP", Const, 0, ""},
+ {"IUCLC", Const, 0, ""},
+ {"IUTF8", Const, 0, ""},
+ {"IXANY", Const, 0, ""},
+ {"IXOFF", Const, 0, ""},
+ {"IXON", Const, 0, ""},
+ {"IfAddrmsg", Type, 0, ""},
+ {"IfAddrmsg.Family", Field, 0, ""},
+ {"IfAddrmsg.Flags", Field, 0, ""},
+ {"IfAddrmsg.Index", Field, 0, ""},
+ {"IfAddrmsg.Prefixlen", Field, 0, ""},
+ {"IfAddrmsg.Scope", Field, 0, ""},
+ {"IfAnnounceMsghdr", Type, 1, ""},
+ {"IfAnnounceMsghdr.Hdrlen", Field, 2, ""},
+ {"IfAnnounceMsghdr.Index", Field, 1, ""},
+ {"IfAnnounceMsghdr.Msglen", Field, 1, ""},
+ {"IfAnnounceMsghdr.Name", Field, 1, ""},
+ {"IfAnnounceMsghdr.Type", Field, 1, ""},
+ {"IfAnnounceMsghdr.Version", Field, 1, ""},
+ {"IfAnnounceMsghdr.What", Field, 1, ""},
+ {"IfData", Type, 0, ""},
+ {"IfData.Addrlen", Field, 0, ""},
+ {"IfData.Baudrate", Field, 0, ""},
+ {"IfData.Capabilities", Field, 2, ""},
+ {"IfData.Collisions", Field, 0, ""},
+ {"IfData.Datalen", Field, 0, ""},
+ {"IfData.Epoch", Field, 0, ""},
+ {"IfData.Hdrlen", Field, 0, ""},
+ {"IfData.Hwassist", Field, 0, ""},
+ {"IfData.Ibytes", Field, 0, ""},
+ {"IfData.Ierrors", Field, 0, ""},
+ {"IfData.Imcasts", Field, 0, ""},
+ {"IfData.Ipackets", Field, 0, ""},
+ {"IfData.Iqdrops", Field, 0, ""},
+ {"IfData.Lastchange", Field, 0, ""},
+ {"IfData.Link_state", Field, 0, ""},
+ {"IfData.Mclpool", Field, 2, ""},
+ {"IfData.Metric", Field, 0, ""},
+ {"IfData.Mtu", Field, 0, ""},
+ {"IfData.Noproto", Field, 0, ""},
+ {"IfData.Obytes", Field, 0, ""},
+ {"IfData.Oerrors", Field, 0, ""},
+ {"IfData.Omcasts", Field, 0, ""},
+ {"IfData.Opackets", Field, 0, ""},
+ {"IfData.Pad", Field, 2, ""},
+ {"IfData.Pad_cgo_0", Field, 2, ""},
+ {"IfData.Pad_cgo_1", Field, 2, ""},
+ {"IfData.Physical", Field, 0, ""},
+ {"IfData.Recvquota", Field, 0, ""},
+ {"IfData.Recvtiming", Field, 0, ""},
+ {"IfData.Reserved1", Field, 0, ""},
+ {"IfData.Reserved2", Field, 0, ""},
+ {"IfData.Spare_char1", Field, 0, ""},
+ {"IfData.Spare_char2", Field, 0, ""},
+ {"IfData.Type", Field, 0, ""},
+ {"IfData.Typelen", Field, 0, ""},
+ {"IfData.Unused1", Field, 0, ""},
+ {"IfData.Unused2", Field, 0, ""},
+ {"IfData.Xmitquota", Field, 0, ""},
+ {"IfData.Xmittiming", Field, 0, ""},
+ {"IfInfomsg", Type, 0, ""},
+ {"IfInfomsg.Change", Field, 0, ""},
+ {"IfInfomsg.Family", Field, 0, ""},
+ {"IfInfomsg.Flags", Field, 0, ""},
+ {"IfInfomsg.Index", Field, 0, ""},
+ {"IfInfomsg.Type", Field, 0, ""},
+ {"IfInfomsg.X__ifi_pad", Field, 0, ""},
+ {"IfMsghdr", Type, 0, ""},
+ {"IfMsghdr.Addrs", Field, 0, ""},
+ {"IfMsghdr.Data", Field, 0, ""},
+ {"IfMsghdr.Flags", Field, 0, ""},
+ {"IfMsghdr.Hdrlen", Field, 2, ""},
+ {"IfMsghdr.Index", Field, 0, ""},
+ {"IfMsghdr.Msglen", Field, 0, ""},
+ {"IfMsghdr.Pad1", Field, 2, ""},
+ {"IfMsghdr.Pad2", Field, 2, ""},
+ {"IfMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"IfMsghdr.Pad_cgo_1", Field, 2, ""},
+ {"IfMsghdr.Tableid", Field, 2, ""},
+ {"IfMsghdr.Type", Field, 0, ""},
+ {"IfMsghdr.Version", Field, 0, ""},
+ {"IfMsghdr.Xflags", Field, 2, ""},
+ {"IfaMsghdr", Type, 0, ""},
+ {"IfaMsghdr.Addrs", Field, 0, ""},
+ {"IfaMsghdr.Flags", Field, 0, ""},
+ {"IfaMsghdr.Hdrlen", Field, 2, ""},
+ {"IfaMsghdr.Index", Field, 0, ""},
+ {"IfaMsghdr.Metric", Field, 0, ""},
+ {"IfaMsghdr.Msglen", Field, 0, ""},
+ {"IfaMsghdr.Pad1", Field, 2, ""},
+ {"IfaMsghdr.Pad2", Field, 2, ""},
+ {"IfaMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"IfaMsghdr.Tableid", Field, 2, ""},
+ {"IfaMsghdr.Type", Field, 0, ""},
+ {"IfaMsghdr.Version", Field, 0, ""},
+ {"IfmaMsghdr", Type, 0, ""},
+ {"IfmaMsghdr.Addrs", Field, 0, ""},
+ {"IfmaMsghdr.Flags", Field, 0, ""},
+ {"IfmaMsghdr.Index", Field, 0, ""},
+ {"IfmaMsghdr.Msglen", Field, 0, ""},
+ {"IfmaMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"IfmaMsghdr.Type", Field, 0, ""},
+ {"IfmaMsghdr.Version", Field, 0, ""},
+ {"IfmaMsghdr2", Type, 0, ""},
+ {"IfmaMsghdr2.Addrs", Field, 0, ""},
+ {"IfmaMsghdr2.Flags", Field, 0, ""},
+ {"IfmaMsghdr2.Index", Field, 0, ""},
+ {"IfmaMsghdr2.Msglen", Field, 0, ""},
+ {"IfmaMsghdr2.Pad_cgo_0", Field, 0, ""},
+ {"IfmaMsghdr2.Refcount", Field, 0, ""},
+ {"IfmaMsghdr2.Type", Field, 0, ""},
+ {"IfmaMsghdr2.Version", Field, 0, ""},
+ {"ImplementsGetwd", Const, 0, ""},
+ {"Inet4Pktinfo", Type, 0, ""},
+ {"Inet4Pktinfo.Addr", Field, 0, ""},
+ {"Inet4Pktinfo.Ifindex", Field, 0, ""},
+ {"Inet4Pktinfo.Spec_dst", Field, 0, ""},
+ {"Inet6Pktinfo", Type, 0, ""},
+ {"Inet6Pktinfo.Addr", Field, 0, ""},
+ {"Inet6Pktinfo.Ifindex", Field, 0, ""},
+ {"InotifyAddWatch", Func, 0, "func(fd int, pathname string, mask uint32) (watchdesc int, err error)"},
+ {"InotifyEvent", Type, 0, ""},
+ {"InotifyEvent.Cookie", Field, 0, ""},
+ {"InotifyEvent.Len", Field, 0, ""},
+ {"InotifyEvent.Mask", Field, 0, ""},
+ {"InotifyEvent.Name", Field, 0, ""},
+ {"InotifyEvent.Wd", Field, 0, ""},
+ {"InotifyInit", Func, 0, "func() (fd int, err error)"},
+ {"InotifyInit1", Func, 0, "func(flags int) (fd int, err error)"},
+ {"InotifyRmWatch", Func, 0, "func(fd int, watchdesc uint32) (success int, err error)"},
+ {"InterfaceAddrMessage", Type, 0, ""},
+ {"InterfaceAddrMessage.Data", Field, 0, ""},
+ {"InterfaceAddrMessage.Header", Field, 0, ""},
+ {"InterfaceAnnounceMessage", Type, 1, ""},
+ {"InterfaceAnnounceMessage.Header", Field, 1, ""},
+ {"InterfaceInfo", Type, 0, ""},
+ {"InterfaceInfo.Address", Field, 0, ""},
+ {"InterfaceInfo.BroadcastAddress", Field, 0, ""},
+ {"InterfaceInfo.Flags", Field, 0, ""},
+ {"InterfaceInfo.Netmask", Field, 0, ""},
+ {"InterfaceMessage", Type, 0, ""},
+ {"InterfaceMessage.Data", Field, 0, ""},
+ {"InterfaceMessage.Header", Field, 0, ""},
+ {"InterfaceMulticastAddrMessage", Type, 0, ""},
+ {"InterfaceMulticastAddrMessage.Data", Field, 0, ""},
+ {"InterfaceMulticastAddrMessage.Header", Field, 0, ""},
+ {"InvalidHandle", Const, 0, ""},
+ {"Ioperm", Func, 0, "func(from int, num int, on int) (err error)"},
+ {"Iopl", Func, 0, "func(level int) (err error)"},
+ {"Iovec", Type, 0, ""},
+ {"Iovec.Base", Field, 0, ""},
+ {"Iovec.Len", Field, 0, ""},
+ {"IpAdapterInfo", Type, 0, ""},
+ {"IpAdapterInfo.AdapterName", Field, 0, ""},
+ {"IpAdapterInfo.Address", Field, 0, ""},
+ {"IpAdapterInfo.AddressLength", Field, 0, ""},
+ {"IpAdapterInfo.ComboIndex", Field, 0, ""},
+ {"IpAdapterInfo.CurrentIpAddress", Field, 0, ""},
+ {"IpAdapterInfo.Description", Field, 0, ""},
+ {"IpAdapterInfo.DhcpEnabled", Field, 0, ""},
+ {"IpAdapterInfo.DhcpServer", Field, 0, ""},
+ {"IpAdapterInfo.GatewayList", Field, 0, ""},
+ {"IpAdapterInfo.HaveWins", Field, 0, ""},
+ {"IpAdapterInfo.Index", Field, 0, ""},
+ {"IpAdapterInfo.IpAddressList", Field, 0, ""},
+ {"IpAdapterInfo.LeaseExpires", Field, 0, ""},
+ {"IpAdapterInfo.LeaseObtained", Field, 0, ""},
+ {"IpAdapterInfo.Next", Field, 0, ""},
+ {"IpAdapterInfo.PrimaryWinsServer", Field, 0, ""},
+ {"IpAdapterInfo.SecondaryWinsServer", Field, 0, ""},
+ {"IpAdapterInfo.Type", Field, 0, ""},
+ {"IpAddrString", Type, 0, ""},
+ {"IpAddrString.Context", Field, 0, ""},
+ {"IpAddrString.IpAddress", Field, 0, ""},
+ {"IpAddrString.IpMask", Field, 0, ""},
+ {"IpAddrString.Next", Field, 0, ""},
+ {"IpAddressString", Type, 0, ""},
+ {"IpAddressString.String", Field, 0, ""},
+ {"IpMaskString", Type, 0, ""},
+ {"IpMaskString.String", Field, 2, ""},
+ {"Issetugid", Func, 0, ""},
+ {"KEY_ALL_ACCESS", Const, 0, ""},
+ {"KEY_CREATE_LINK", Const, 0, ""},
+ {"KEY_CREATE_SUB_KEY", Const, 0, ""},
+ {"KEY_ENUMERATE_SUB_KEYS", Const, 0, ""},
+ {"KEY_EXECUTE", Const, 0, ""},
+ {"KEY_NOTIFY", Const, 0, ""},
+ {"KEY_QUERY_VALUE", Const, 0, ""},
+ {"KEY_READ", Const, 0, ""},
+ {"KEY_SET_VALUE", Const, 0, ""},
+ {"KEY_WOW64_32KEY", Const, 0, ""},
+ {"KEY_WOW64_64KEY", Const, 0, ""},
+ {"KEY_WRITE", Const, 0, ""},
+ {"Kevent", Func, 0, ""},
+ {"Kevent_t", Type, 0, ""},
+ {"Kevent_t.Data", Field, 0, ""},
+ {"Kevent_t.Fflags", Field, 0, ""},
+ {"Kevent_t.Filter", Field, 0, ""},
+ {"Kevent_t.Flags", Field, 0, ""},
+ {"Kevent_t.Ident", Field, 0, ""},
+ {"Kevent_t.Pad_cgo_0", Field, 2, ""},
+ {"Kevent_t.Udata", Field, 0, ""},
+ {"Kill", Func, 0, "func(pid int, sig Signal) (err error)"},
+ {"Klogctl", Func, 0, "func(typ int, buf []byte) (n int, err error)"},
+ {"Kqueue", Func, 0, ""},
+ {"LANG_ENGLISH", Const, 0, ""},
+ {"LAYERED_PROTOCOL", Const, 2, ""},
+ {"LCNT_OVERLOAD_FLUSH", Const, 1, ""},
+ {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_CAD_ON", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_HALT", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_KEXEC", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_RESTART", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_RESTART2", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0, ""},
+ {"LINUX_REBOOT_MAGIC1", Const, 0, ""},
+ {"LINUX_REBOOT_MAGIC2", Const, 0, ""},
+ {"LOCK_EX", Const, 0, ""},
+ {"LOCK_NB", Const, 0, ""},
+ {"LOCK_SH", Const, 0, ""},
+ {"LOCK_UN", Const, 0, ""},
+ {"LazyDLL", Type, 0, ""},
+ {"LazyDLL.Name", Field, 0, ""},
+ {"LazyProc", Type, 0, ""},
+ {"LazyProc.Name", Field, 0, ""},
+ {"Lchown", Func, 0, "func(path string, uid int, gid int) (err error)"},
+ {"Linger", Type, 0, ""},
+ {"Linger.Linger", Field, 0, ""},
+ {"Linger.Onoff", Field, 0, ""},
+ {"Link", Func, 0, "func(oldpath string, newpath string) (err error)"},
+ {"Listen", Func, 0, "func(s int, n int) (err error)"},
+ {"Listxattr", Func, 1, "func(path string, dest []byte) (sz int, err error)"},
+ {"LoadCancelIoEx", Func, 1, ""},
+ {"LoadConnectEx", Func, 1, ""},
+ {"LoadCreateSymbolicLink", Func, 4, ""},
+ {"LoadDLL", Func, 0, ""},
+ {"LoadGetAddrInfo", Func, 1, ""},
+ {"LoadLibrary", Func, 0, ""},
+ {"LoadSetFileCompletionNotificationModes", Func, 2, ""},
+ {"LocalFree", Func, 0, ""},
+ {"Log2phys_t", Type, 0, ""},
+ {"Log2phys_t.Contigbytes", Field, 0, ""},
+ {"Log2phys_t.Devoffset", Field, 0, ""},
+ {"Log2phys_t.Flags", Field, 0, ""},
+ {"LookupAccountName", Func, 0, ""},
+ {"LookupAccountSid", Func, 0, ""},
+ {"LookupSID", Func, 0, ""},
+ {"LsfJump", Func, 0, "func(code int, k int, jt int, jf int) *SockFilter"},
+ {"LsfSocket", Func, 0, "func(ifindex int, proto int) (int, error)"},
+ {"LsfStmt", Func, 0, "func(code int, k int) *SockFilter"},
+ {"Lstat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
+ {"MADV_AUTOSYNC", Const, 1, ""},
+ {"MADV_CAN_REUSE", Const, 0, ""},
+ {"MADV_CORE", Const, 1, ""},
+ {"MADV_DOFORK", Const, 0, ""},
+ {"MADV_DONTFORK", Const, 0, ""},
+ {"MADV_DONTNEED", Const, 0, ""},
+ {"MADV_FREE", Const, 0, ""},
+ {"MADV_FREE_REUSABLE", Const, 0, ""},
+ {"MADV_FREE_REUSE", Const, 0, ""},
+ {"MADV_HUGEPAGE", Const, 0, ""},
+ {"MADV_HWPOISON", Const, 0, ""},
+ {"MADV_MERGEABLE", Const, 0, ""},
+ {"MADV_NOCORE", Const, 1, ""},
+ {"MADV_NOHUGEPAGE", Const, 0, ""},
+ {"MADV_NORMAL", Const, 0, ""},
+ {"MADV_NOSYNC", Const, 1, ""},
+ {"MADV_PROTECT", Const, 1, ""},
+ {"MADV_RANDOM", Const, 0, ""},
+ {"MADV_REMOVE", Const, 0, ""},
+ {"MADV_SEQUENTIAL", Const, 0, ""},
+ {"MADV_SPACEAVAIL", Const, 3, ""},
+ {"MADV_UNMERGEABLE", Const, 0, ""},
+ {"MADV_WILLNEED", Const, 0, ""},
+ {"MADV_ZERO_WIRED_PAGES", Const, 0, ""},
+ {"MAP_32BIT", Const, 0, ""},
+ {"MAP_ALIGNED_SUPER", Const, 3, ""},
+ {"MAP_ALIGNMENT_16MB", Const, 3, ""},
+ {"MAP_ALIGNMENT_1TB", Const, 3, ""},
+ {"MAP_ALIGNMENT_256TB", Const, 3, ""},
+ {"MAP_ALIGNMENT_4GB", Const, 3, ""},
+ {"MAP_ALIGNMENT_64KB", Const, 3, ""},
+ {"MAP_ALIGNMENT_64PB", Const, 3, ""},
+ {"MAP_ALIGNMENT_MASK", Const, 3, ""},
+ {"MAP_ALIGNMENT_SHIFT", Const, 3, ""},
+ {"MAP_ANON", Const, 0, ""},
+ {"MAP_ANONYMOUS", Const, 0, ""},
+ {"MAP_COPY", Const, 0, ""},
+ {"MAP_DENYWRITE", Const, 0, ""},
+ {"MAP_EXECUTABLE", Const, 0, ""},
+ {"MAP_FILE", Const, 0, ""},
+ {"MAP_FIXED", Const, 0, ""},
+ {"MAP_FLAGMASK", Const, 3, ""},
+ {"MAP_GROWSDOWN", Const, 0, ""},
+ {"MAP_HASSEMAPHORE", Const, 0, ""},
+ {"MAP_HUGETLB", Const, 0, ""},
+ {"MAP_INHERIT", Const, 3, ""},
+ {"MAP_INHERIT_COPY", Const, 3, ""},
+ {"MAP_INHERIT_DEFAULT", Const, 3, ""},
+ {"MAP_INHERIT_DONATE_COPY", Const, 3, ""},
+ {"MAP_INHERIT_NONE", Const, 3, ""},
+ {"MAP_INHERIT_SHARE", Const, 3, ""},
+ {"MAP_JIT", Const, 0, ""},
+ {"MAP_LOCKED", Const, 0, ""},
+ {"MAP_NOCACHE", Const, 0, ""},
+ {"MAP_NOCORE", Const, 1, ""},
+ {"MAP_NOEXTEND", Const, 0, ""},
+ {"MAP_NONBLOCK", Const, 0, ""},
+ {"MAP_NORESERVE", Const, 0, ""},
+ {"MAP_NOSYNC", Const, 1, ""},
+ {"MAP_POPULATE", Const, 0, ""},
+ {"MAP_PREFAULT_READ", Const, 1, ""},
+ {"MAP_PRIVATE", Const, 0, ""},
+ {"MAP_RENAME", Const, 0, ""},
+ {"MAP_RESERVED0080", Const, 0, ""},
+ {"MAP_RESERVED0100", Const, 1, ""},
+ {"MAP_SHARED", Const, 0, ""},
+ {"MAP_STACK", Const, 0, ""},
+ {"MAP_TRYFIXED", Const, 3, ""},
+ {"MAP_TYPE", Const, 0, ""},
+ {"MAP_WIRED", Const, 3, ""},
+ {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4, ""},
+ {"MAXLEN_IFDESCR", Const, 0, ""},
+ {"MAXLEN_PHYSADDR", Const, 0, ""},
+ {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0, ""},
+ {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0, ""},
+ {"MAX_ADAPTER_NAME_LENGTH", Const, 0, ""},
+ {"MAX_COMPUTERNAME_LENGTH", Const, 0, ""},
+ {"MAX_INTERFACE_NAME_LEN", Const, 0, ""},
+ {"MAX_LONG_PATH", Const, 0, ""},
+ {"MAX_PATH", Const, 0, ""},
+ {"MAX_PROTOCOL_CHAIN", Const, 2, ""},
+ {"MCL_CURRENT", Const, 0, ""},
+ {"MCL_FUTURE", Const, 0, ""},
+ {"MNT_DETACH", Const, 0, ""},
+ {"MNT_EXPIRE", Const, 0, ""},
+ {"MNT_FORCE", Const, 0, ""},
+ {"MSG_BCAST", Const, 1, ""},
+ {"MSG_CMSG_CLOEXEC", Const, 0, ""},
+ {"MSG_COMPAT", Const, 0, ""},
+ {"MSG_CONFIRM", Const, 0, ""},
+ {"MSG_CONTROLMBUF", Const, 1, ""},
+ {"MSG_CTRUNC", Const, 0, ""},
+ {"MSG_DONTROUTE", Const, 0, ""},
+ {"MSG_DONTWAIT", Const, 0, ""},
+ {"MSG_EOF", Const, 0, ""},
+ {"MSG_EOR", Const, 0, ""},
+ {"MSG_ERRQUEUE", Const, 0, ""},
+ {"MSG_FASTOPEN", Const, 1, ""},
+ {"MSG_FIN", Const, 0, ""},
+ {"MSG_FLUSH", Const, 0, ""},
+ {"MSG_HAVEMORE", Const, 0, ""},
+ {"MSG_HOLD", Const, 0, ""},
+ {"MSG_IOVUSRSPACE", Const, 1, ""},
+ {"MSG_LENUSRSPACE", Const, 1, ""},
+ {"MSG_MCAST", Const, 1, ""},
+ {"MSG_MORE", Const, 0, ""},
+ {"MSG_NAMEMBUF", Const, 1, ""},
+ {"MSG_NBIO", Const, 0, ""},
+ {"MSG_NEEDSA", Const, 0, ""},
+ {"MSG_NOSIGNAL", Const, 0, ""},
+ {"MSG_NOTIFICATION", Const, 0, ""},
+ {"MSG_OOB", Const, 0, ""},
+ {"MSG_PEEK", Const, 0, ""},
+ {"MSG_PROXY", Const, 0, ""},
+ {"MSG_RCVMORE", Const, 0, ""},
+ {"MSG_RST", Const, 0, ""},
+ {"MSG_SEND", Const, 0, ""},
+ {"MSG_SYN", Const, 0, ""},
+ {"MSG_TRUNC", Const, 0, ""},
+ {"MSG_TRYHARD", Const, 0, ""},
+ {"MSG_USERFLAGS", Const, 1, ""},
+ {"MSG_WAITALL", Const, 0, ""},
+ {"MSG_WAITFORONE", Const, 0, ""},
+ {"MSG_WAITSTREAM", Const, 0, ""},
+ {"MS_ACTIVE", Const, 0, ""},
+ {"MS_ASYNC", Const, 0, ""},
+ {"MS_BIND", Const, 0, ""},
+ {"MS_DEACTIVATE", Const, 0, ""},
+ {"MS_DIRSYNC", Const, 0, ""},
+ {"MS_INVALIDATE", Const, 0, ""},
+ {"MS_I_VERSION", Const, 0, ""},
+ {"MS_KERNMOUNT", Const, 0, ""},
+ {"MS_KILLPAGES", Const, 0, ""},
+ {"MS_MANDLOCK", Const, 0, ""},
+ {"MS_MGC_MSK", Const, 0, ""},
+ {"MS_MGC_VAL", Const, 0, ""},
+ {"MS_MOVE", Const, 0, ""},
+ {"MS_NOATIME", Const, 0, ""},
+ {"MS_NODEV", Const, 0, ""},
+ {"MS_NODIRATIME", Const, 0, ""},
+ {"MS_NOEXEC", Const, 0, ""},
+ {"MS_NOSUID", Const, 0, ""},
+ {"MS_NOUSER", Const, 0, ""},
+ {"MS_POSIXACL", Const, 0, ""},
+ {"MS_PRIVATE", Const, 0, ""},
+ {"MS_RDONLY", Const, 0, ""},
+ {"MS_REC", Const, 0, ""},
+ {"MS_RELATIME", Const, 0, ""},
+ {"MS_REMOUNT", Const, 0, ""},
+ {"MS_RMT_MASK", Const, 0, ""},
+ {"MS_SHARED", Const, 0, ""},
+ {"MS_SILENT", Const, 0, ""},
+ {"MS_SLAVE", Const, 0, ""},
+ {"MS_STRICTATIME", Const, 0, ""},
+ {"MS_SYNC", Const, 0, ""},
+ {"MS_SYNCHRONOUS", Const, 0, ""},
+ {"MS_UNBINDABLE", Const, 0, ""},
+ {"Madvise", Func, 0, "func(b []byte, advice int) (err error)"},
+ {"MapViewOfFile", Func, 0, ""},
+ {"MaxTokenInfoClass", Const, 0, ""},
+ {"Mclpool", Type, 2, ""},
+ {"Mclpool.Alive", Field, 2, ""},
+ {"Mclpool.Cwm", Field, 2, ""},
+ {"Mclpool.Grown", Field, 2, ""},
+ {"Mclpool.Hwm", Field, 2, ""},
+ {"Mclpool.Lwm", Field, 2, ""},
+ {"MibIfRow", Type, 0, ""},
+ {"MibIfRow.AdminStatus", Field, 0, ""},
+ {"MibIfRow.Descr", Field, 0, ""},
+ {"MibIfRow.DescrLen", Field, 0, ""},
+ {"MibIfRow.InDiscards", Field, 0, ""},
+ {"MibIfRow.InErrors", Field, 0, ""},
+ {"MibIfRow.InNUcastPkts", Field, 0, ""},
+ {"MibIfRow.InOctets", Field, 0, ""},
+ {"MibIfRow.InUcastPkts", Field, 0, ""},
+ {"MibIfRow.InUnknownProtos", Field, 0, ""},
+ {"MibIfRow.Index", Field, 0, ""},
+ {"MibIfRow.LastChange", Field, 0, ""},
+ {"MibIfRow.Mtu", Field, 0, ""},
+ {"MibIfRow.Name", Field, 0, ""},
+ {"MibIfRow.OperStatus", Field, 0, ""},
+ {"MibIfRow.OutDiscards", Field, 0, ""},
+ {"MibIfRow.OutErrors", Field, 0, ""},
+ {"MibIfRow.OutNUcastPkts", Field, 0, ""},
+ {"MibIfRow.OutOctets", Field, 0, ""},
+ {"MibIfRow.OutQLen", Field, 0, ""},
+ {"MibIfRow.OutUcastPkts", Field, 0, ""},
+ {"MibIfRow.PhysAddr", Field, 0, ""},
+ {"MibIfRow.PhysAddrLen", Field, 0, ""},
+ {"MibIfRow.Speed", Field, 0, ""},
+ {"MibIfRow.Type", Field, 0, ""},
+ {"Mkdir", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Mkdirat", Func, 0, "func(dirfd int, path string, mode uint32) (err error)"},
+ {"Mkfifo", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Mknod", Func, 0, "func(path string, mode uint32, dev int) (err error)"},
+ {"Mknodat", Func, 0, "func(dirfd int, path string, mode uint32, dev int) (err error)"},
+ {"Mlock", Func, 0, "func(b []byte) (err error)"},
+ {"Mlockall", Func, 0, "func(flags int) (err error)"},
+ {"Mmap", Func, 0, "func(fd int, offset int64, length int, prot int, flags int) (data []byte, err error)"},
+ {"Mount", Func, 0, "func(source string, target string, fstype string, flags uintptr, data string) (err error)"},
+ {"MoveFile", Func, 0, ""},
+ {"Mprotect", Func, 0, "func(b []byte, prot int) (err error)"},
+ {"Msghdr", Type, 0, ""},
+ {"Msghdr.Control", Field, 0, ""},
+ {"Msghdr.Controllen", Field, 0, ""},
+ {"Msghdr.Flags", Field, 0, ""},
+ {"Msghdr.Iov", Field, 0, ""},
+ {"Msghdr.Iovlen", Field, 0, ""},
+ {"Msghdr.Name", Field, 0, ""},
+ {"Msghdr.Namelen", Field, 0, ""},
+ {"Msghdr.Pad_cgo_0", Field, 0, ""},
+ {"Msghdr.Pad_cgo_1", Field, 0, ""},
+ {"Munlock", Func, 0, "func(b []byte) (err error)"},
+ {"Munlockall", Func, 0, "func() (err error)"},
+ {"Munmap", Func, 0, "func(b []byte) (err error)"},
+ {"MustLoadDLL", Func, 0, ""},
+ {"NAME_MAX", Const, 0, ""},
+ {"NETLINK_ADD_MEMBERSHIP", Const, 0, ""},
+ {"NETLINK_AUDIT", Const, 0, ""},
+ {"NETLINK_BROADCAST_ERROR", Const, 0, ""},
+ {"NETLINK_CONNECTOR", Const, 0, ""},
+ {"NETLINK_DNRTMSG", Const, 0, ""},
+ {"NETLINK_DROP_MEMBERSHIP", Const, 0, ""},
+ {"NETLINK_ECRYPTFS", Const, 0, ""},
+ {"NETLINK_FIB_LOOKUP", Const, 0, ""},
+ {"NETLINK_FIREWALL", Const, 0, ""},
+ {"NETLINK_GENERIC", Const, 0, ""},
+ {"NETLINK_INET_DIAG", Const, 0, ""},
+ {"NETLINK_IP6_FW", Const, 0, ""},
+ {"NETLINK_ISCSI", Const, 0, ""},
+ {"NETLINK_KOBJECT_UEVENT", Const, 0, ""},
+ {"NETLINK_NETFILTER", Const, 0, ""},
+ {"NETLINK_NFLOG", Const, 0, ""},
+ {"NETLINK_NO_ENOBUFS", Const, 0, ""},
+ {"NETLINK_PKTINFO", Const, 0, ""},
+ {"NETLINK_RDMA", Const, 0, ""},
+ {"NETLINK_ROUTE", Const, 0, ""},
+ {"NETLINK_SCSITRANSPORT", Const, 0, ""},
+ {"NETLINK_SELINUX", Const, 0, ""},
+ {"NETLINK_UNUSED", Const, 0, ""},
+ {"NETLINK_USERSOCK", Const, 0, ""},
+ {"NETLINK_XFRM", Const, 0, ""},
+ {"NET_RT_DUMP", Const, 0, ""},
+ {"NET_RT_DUMP2", Const, 0, ""},
+ {"NET_RT_FLAGS", Const, 0, ""},
+ {"NET_RT_IFLIST", Const, 0, ""},
+ {"NET_RT_IFLIST2", Const, 0, ""},
+ {"NET_RT_IFLISTL", Const, 1, ""},
+ {"NET_RT_IFMALIST", Const, 0, ""},
+ {"NET_RT_MAXID", Const, 0, ""},
+ {"NET_RT_OIFLIST", Const, 1, ""},
+ {"NET_RT_OOIFLIST", Const, 1, ""},
+ {"NET_RT_STAT", Const, 0, ""},
+ {"NET_RT_STATS", Const, 1, ""},
+ {"NET_RT_TABLE", Const, 1, ""},
+ {"NET_RT_TRASH", Const, 0, ""},
+ {"NLA_ALIGNTO", Const, 0, ""},
+ {"NLA_F_NESTED", Const, 0, ""},
+ {"NLA_F_NET_BYTEORDER", Const, 0, ""},
+ {"NLA_HDRLEN", Const, 0, ""},
+ {"NLMSG_ALIGNTO", Const, 0, ""},
+ {"NLMSG_DONE", Const, 0, ""},
+ {"NLMSG_ERROR", Const, 0, ""},
+ {"NLMSG_HDRLEN", Const, 0, ""},
+ {"NLMSG_MIN_TYPE", Const, 0, ""},
+ {"NLMSG_NOOP", Const, 0, ""},
+ {"NLMSG_OVERRUN", Const, 0, ""},
+ {"NLM_F_ACK", Const, 0, ""},
+ {"NLM_F_APPEND", Const, 0, ""},
+ {"NLM_F_ATOMIC", Const, 0, ""},
+ {"NLM_F_CREATE", Const, 0, ""},
+ {"NLM_F_DUMP", Const, 0, ""},
+ {"NLM_F_ECHO", Const, 0, ""},
+ {"NLM_F_EXCL", Const, 0, ""},
+ {"NLM_F_MATCH", Const, 0, ""},
+ {"NLM_F_MULTI", Const, 0, ""},
+ {"NLM_F_REPLACE", Const, 0, ""},
+ {"NLM_F_REQUEST", Const, 0, ""},
+ {"NLM_F_ROOT", Const, 0, ""},
+ {"NOFLSH", Const, 0, ""},
+ {"NOTE_ABSOLUTE", Const, 0, ""},
+ {"NOTE_ATTRIB", Const, 0, ""},
+ {"NOTE_BACKGROUND", Const, 16, ""},
+ {"NOTE_CHILD", Const, 0, ""},
+ {"NOTE_CRITICAL", Const, 16, ""},
+ {"NOTE_DELETE", Const, 0, ""},
+ {"NOTE_EOF", Const, 1, ""},
+ {"NOTE_EXEC", Const, 0, ""},
+ {"NOTE_EXIT", Const, 0, ""},
+ {"NOTE_EXITSTATUS", Const, 0, ""},
+ {"NOTE_EXIT_CSERROR", Const, 16, ""},
+ {"NOTE_EXIT_DECRYPTFAIL", Const, 16, ""},
+ {"NOTE_EXIT_DETAIL", Const, 16, ""},
+ {"NOTE_EXIT_DETAIL_MASK", Const, 16, ""},
+ {"NOTE_EXIT_MEMORY", Const, 16, ""},
+ {"NOTE_EXIT_REPARENTED", Const, 16, ""},
+ {"NOTE_EXTEND", Const, 0, ""},
+ {"NOTE_FFAND", Const, 0, ""},
+ {"NOTE_FFCOPY", Const, 0, ""},
+ {"NOTE_FFCTRLMASK", Const, 0, ""},
+ {"NOTE_FFLAGSMASK", Const, 0, ""},
+ {"NOTE_FFNOP", Const, 0, ""},
+ {"NOTE_FFOR", Const, 0, ""},
+ {"NOTE_FORK", Const, 0, ""},
+ {"NOTE_LEEWAY", Const, 16, ""},
+ {"NOTE_LINK", Const, 0, ""},
+ {"NOTE_LOWAT", Const, 0, ""},
+ {"NOTE_NONE", Const, 0, ""},
+ {"NOTE_NSECONDS", Const, 0, ""},
+ {"NOTE_PCTRLMASK", Const, 0, ""},
+ {"NOTE_PDATAMASK", Const, 0, ""},
+ {"NOTE_REAP", Const, 0, ""},
+ {"NOTE_RENAME", Const, 0, ""},
+ {"NOTE_RESOURCEEND", Const, 0, ""},
+ {"NOTE_REVOKE", Const, 0, ""},
+ {"NOTE_SECONDS", Const, 0, ""},
+ {"NOTE_SIGNAL", Const, 0, ""},
+ {"NOTE_TRACK", Const, 0, ""},
+ {"NOTE_TRACKERR", Const, 0, ""},
+ {"NOTE_TRIGGER", Const, 0, ""},
+ {"NOTE_TRUNCATE", Const, 1, ""},
+ {"NOTE_USECONDS", Const, 0, ""},
+ {"NOTE_VM_ERROR", Const, 0, ""},
+ {"NOTE_VM_PRESSURE", Const, 0, ""},
+ {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0, ""},
+ {"NOTE_VM_PRESSURE_TERMINATE", Const, 0, ""},
+ {"NOTE_WRITE", Const, 0, ""},
+ {"NameCanonical", Const, 0, ""},
+ {"NameCanonicalEx", Const, 0, ""},
+ {"NameDisplay", Const, 0, ""},
+ {"NameDnsDomain", Const, 0, ""},
+ {"NameFullyQualifiedDN", Const, 0, ""},
+ {"NameSamCompatible", Const, 0, ""},
+ {"NameServicePrincipal", Const, 0, ""},
+ {"NameUniqueId", Const, 0, ""},
+ {"NameUnknown", Const, 0, ""},
+ {"NameUserPrincipal", Const, 0, ""},
+ {"Nanosleep", Func, 0, "func(time *Timespec, leftover *Timespec) (err error)"},
+ {"NetApiBufferFree", Func, 0, ""},
+ {"NetGetJoinInformation", Func, 2, ""},
+ {"NetSetupDomainName", Const, 2, ""},
+ {"NetSetupUnjoined", Const, 2, ""},
+ {"NetSetupUnknownStatus", Const, 2, ""},
+ {"NetSetupWorkgroupName", Const, 2, ""},
+ {"NetUserGetInfo", Func, 0, ""},
+ {"NetlinkMessage", Type, 0, ""},
+ {"NetlinkMessage.Data", Field, 0, ""},
+ {"NetlinkMessage.Header", Field, 0, ""},
+ {"NetlinkRIB", Func, 0, "func(proto int, family int) ([]byte, error)"},
+ {"NetlinkRouteAttr", Type, 0, ""},
+ {"NetlinkRouteAttr.Attr", Field, 0, ""},
+ {"NetlinkRouteAttr.Value", Field, 0, ""},
+ {"NetlinkRouteRequest", Type, 0, ""},
+ {"NetlinkRouteRequest.Data", Field, 0, ""},
+ {"NetlinkRouteRequest.Header", Field, 0, ""},
+ {"NewCallback", Func, 0, ""},
+ {"NewCallbackCDecl", Func, 3, ""},
+ {"NewLazyDLL", Func, 0, ""},
+ {"NlAttr", Type, 0, ""},
+ {"NlAttr.Len", Field, 0, ""},
+ {"NlAttr.Type", Field, 0, ""},
+ {"NlMsgerr", Type, 0, ""},
+ {"NlMsgerr.Error", Field, 0, ""},
+ {"NlMsgerr.Msg", Field, 0, ""},
+ {"NlMsghdr", Type, 0, ""},
+ {"NlMsghdr.Flags", Field, 0, ""},
+ {"NlMsghdr.Len", Field, 0, ""},
+ {"NlMsghdr.Pid", Field, 0, ""},
+ {"NlMsghdr.Seq", Field, 0, ""},
+ {"NlMsghdr.Type", Field, 0, ""},
+ {"NsecToFiletime", Func, 0, ""},
+ {"NsecToTimespec", Func, 0, "func(nsec int64) Timespec"},
+ {"NsecToTimeval", Func, 0, "func(nsec int64) Timeval"},
+ {"Ntohs", Func, 0, ""},
+ {"OCRNL", Const, 0, ""},
+ {"OFDEL", Const, 0, ""},
+ {"OFILL", Const, 0, ""},
+ {"OFIOGETBMAP", Const, 1, ""},
+ {"OID_PKIX_KP_SERVER_AUTH", Var, 0, ""},
+ {"OID_SERVER_GATED_CRYPTO", Var, 0, ""},
+ {"OID_SGC_NETSCAPE", Var, 0, ""},
+ {"OLCUC", Const, 0, ""},
+ {"ONLCR", Const, 0, ""},
+ {"ONLRET", Const, 0, ""},
+ {"ONOCR", Const, 0, ""},
+ {"ONOEOT", Const, 1, ""},
+ {"OPEN_ALWAYS", Const, 0, ""},
+ {"OPEN_EXISTING", Const, 0, ""},
+ {"OPOST", Const, 0, ""},
+ {"O_ACCMODE", Const, 0, ""},
+ {"O_ALERT", Const, 0, ""},
+ {"O_ALT_IO", Const, 1, ""},
+ {"O_APPEND", Const, 0, ""},
+ {"O_ASYNC", Const, 0, ""},
+ {"O_CLOEXEC", Const, 0, ""},
+ {"O_CREAT", Const, 0, ""},
+ {"O_DIRECT", Const, 0, ""},
+ {"O_DIRECTORY", Const, 0, ""},
+ {"O_DP_GETRAWENCRYPTED", Const, 16, ""},
+ {"O_DSYNC", Const, 0, ""},
+ {"O_EVTONLY", Const, 0, ""},
+ {"O_EXCL", Const, 0, ""},
+ {"O_EXEC", Const, 0, ""},
+ {"O_EXLOCK", Const, 0, ""},
+ {"O_FSYNC", Const, 0, ""},
+ {"O_LARGEFILE", Const, 0, ""},
+ {"O_NDELAY", Const, 0, ""},
+ {"O_NOATIME", Const, 0, ""},
+ {"O_NOCTTY", Const, 0, ""},
+ {"O_NOFOLLOW", Const, 0, ""},
+ {"O_NONBLOCK", Const, 0, ""},
+ {"O_NOSIGPIPE", Const, 1, ""},
+ {"O_POPUP", Const, 0, ""},
+ {"O_RDONLY", Const, 0, ""},
+ {"O_RDWR", Const, 0, ""},
+ {"O_RSYNC", Const, 0, ""},
+ {"O_SHLOCK", Const, 0, ""},
+ {"O_SYMLINK", Const, 0, ""},
+ {"O_SYNC", Const, 0, ""},
+ {"O_TRUNC", Const, 0, ""},
+ {"O_TTY_INIT", Const, 0, ""},
+ {"O_WRONLY", Const, 0, ""},
+ {"Open", Func, 0, "func(path string, mode int, perm uint32) (fd int, err error)"},
+ {"OpenCurrentProcessToken", Func, 0, ""},
+ {"OpenProcess", Func, 0, ""},
+ {"OpenProcessToken", Func, 0, ""},
+ {"Openat", Func, 0, "func(dirfd int, path string, flags int, mode uint32) (fd int, err error)"},
+ {"Overlapped", Type, 0, ""},
+ {"Overlapped.HEvent", Field, 0, ""},
+ {"Overlapped.Internal", Field, 0, ""},
+ {"Overlapped.InternalHigh", Field, 0, ""},
+ {"Overlapped.Offset", Field, 0, ""},
+ {"Overlapped.OffsetHigh", Field, 0, ""},
+ {"PACKET_ADD_MEMBERSHIP", Const, 0, ""},
+ {"PACKET_BROADCAST", Const, 0, ""},
+ {"PACKET_DROP_MEMBERSHIP", Const, 0, ""},
+ {"PACKET_FASTROUTE", Const, 0, ""},
+ {"PACKET_HOST", Const, 0, ""},
+ {"PACKET_LOOPBACK", Const, 0, ""},
+ {"PACKET_MR_ALLMULTI", Const, 0, ""},
+ {"PACKET_MR_MULTICAST", Const, 0, ""},
+ {"PACKET_MR_PROMISC", Const, 0, ""},
+ {"PACKET_MULTICAST", Const, 0, ""},
+ {"PACKET_OTHERHOST", Const, 0, ""},
+ {"PACKET_OUTGOING", Const, 0, ""},
+ {"PACKET_RECV_OUTPUT", Const, 0, ""},
+ {"PACKET_RX_RING", Const, 0, ""},
+ {"PACKET_STATISTICS", Const, 0, ""},
+ {"PAGE_EXECUTE_READ", Const, 0, ""},
+ {"PAGE_EXECUTE_READWRITE", Const, 0, ""},
+ {"PAGE_EXECUTE_WRITECOPY", Const, 0, ""},
+ {"PAGE_READONLY", Const, 0, ""},
+ {"PAGE_READWRITE", Const, 0, ""},
+ {"PAGE_WRITECOPY", Const, 0, ""},
+ {"PARENB", Const, 0, ""},
+ {"PARMRK", Const, 0, ""},
+ {"PARODD", Const, 0, ""},
+ {"PENDIN", Const, 0, ""},
+ {"PFL_HIDDEN", Const, 2, ""},
+ {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2, ""},
+ {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2, ""},
+ {"PFL_NETWORKDIRECT_PROVIDER", Const, 2, ""},
+ {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2, ""},
+ {"PF_FLUSH", Const, 1, ""},
+ {"PKCS_7_ASN_ENCODING", Const, 0, ""},
+ {"PMC5_PIPELINE_FLUSH", Const, 1, ""},
+ {"PRIO_PGRP", Const, 2, ""},
+ {"PRIO_PROCESS", Const, 2, ""},
+ {"PRIO_USER", Const, 2, ""},
+ {"PRI_IOFLUSH", Const, 1, ""},
+ {"PROCESS_QUERY_INFORMATION", Const, 0, ""},
+ {"PROCESS_TERMINATE", Const, 2, ""},
+ {"PROT_EXEC", Const, 0, ""},
+ {"PROT_GROWSDOWN", Const, 0, ""},
+ {"PROT_GROWSUP", Const, 0, ""},
+ {"PROT_NONE", Const, 0, ""},
+ {"PROT_READ", Const, 0, ""},
+ {"PROT_WRITE", Const, 0, ""},
+ {"PROV_DH_SCHANNEL", Const, 0, ""},
+ {"PROV_DSS", Const, 0, ""},
+ {"PROV_DSS_DH", Const, 0, ""},
+ {"PROV_EC_ECDSA_FULL", Const, 0, ""},
+ {"PROV_EC_ECDSA_SIG", Const, 0, ""},
+ {"PROV_EC_ECNRA_FULL", Const, 0, ""},
+ {"PROV_EC_ECNRA_SIG", Const, 0, ""},
+ {"PROV_FORTEZZA", Const, 0, ""},
+ {"PROV_INTEL_SEC", Const, 0, ""},
+ {"PROV_MS_EXCHANGE", Const, 0, ""},
+ {"PROV_REPLACE_OWF", Const, 0, ""},
+ {"PROV_RNG", Const, 0, ""},
+ {"PROV_RSA_AES", Const, 0, ""},
+ {"PROV_RSA_FULL", Const, 0, ""},
+ {"PROV_RSA_SCHANNEL", Const, 0, ""},
+ {"PROV_RSA_SIG", Const, 0, ""},
+ {"PROV_SPYRUS_LYNKS", Const, 0, ""},
+ {"PROV_SSL", Const, 0, ""},
+ {"PR_CAPBSET_DROP", Const, 0, ""},
+ {"PR_CAPBSET_READ", Const, 0, ""},
+ {"PR_CLEAR_SECCOMP_FILTER", Const, 0, ""},
+ {"PR_ENDIAN_BIG", Const, 0, ""},
+ {"PR_ENDIAN_LITTLE", Const, 0, ""},
+ {"PR_ENDIAN_PPC_LITTLE", Const, 0, ""},
+ {"PR_FPEMU_NOPRINT", Const, 0, ""},
+ {"PR_FPEMU_SIGFPE", Const, 0, ""},
+ {"PR_FP_EXC_ASYNC", Const, 0, ""},
+ {"PR_FP_EXC_DISABLED", Const, 0, ""},
+ {"PR_FP_EXC_DIV", Const, 0, ""},
+ {"PR_FP_EXC_INV", Const, 0, ""},
+ {"PR_FP_EXC_NONRECOV", Const, 0, ""},
+ {"PR_FP_EXC_OVF", Const, 0, ""},
+ {"PR_FP_EXC_PRECISE", Const, 0, ""},
+ {"PR_FP_EXC_RES", Const, 0, ""},
+ {"PR_FP_EXC_SW_ENABLE", Const, 0, ""},
+ {"PR_FP_EXC_UND", Const, 0, ""},
+ {"PR_GET_DUMPABLE", Const, 0, ""},
+ {"PR_GET_ENDIAN", Const, 0, ""},
+ {"PR_GET_FPEMU", Const, 0, ""},
+ {"PR_GET_FPEXC", Const, 0, ""},
+ {"PR_GET_KEEPCAPS", Const, 0, ""},
+ {"PR_GET_NAME", Const, 0, ""},
+ {"PR_GET_PDEATHSIG", Const, 0, ""},
+ {"PR_GET_SECCOMP", Const, 0, ""},
+ {"PR_GET_SECCOMP_FILTER", Const, 0, ""},
+ {"PR_GET_SECUREBITS", Const, 0, ""},
+ {"PR_GET_TIMERSLACK", Const, 0, ""},
+ {"PR_GET_TIMING", Const, 0, ""},
+ {"PR_GET_TSC", Const, 0, ""},
+ {"PR_GET_UNALIGN", Const, 0, ""},
+ {"PR_MCE_KILL", Const, 0, ""},
+ {"PR_MCE_KILL_CLEAR", Const, 0, ""},
+ {"PR_MCE_KILL_DEFAULT", Const, 0, ""},
+ {"PR_MCE_KILL_EARLY", Const, 0, ""},
+ {"PR_MCE_KILL_GET", Const, 0, ""},
+ {"PR_MCE_KILL_LATE", Const, 0, ""},
+ {"PR_MCE_KILL_SET", Const, 0, ""},
+ {"PR_SECCOMP_FILTER_EVENT", Const, 0, ""},
+ {"PR_SECCOMP_FILTER_SYSCALL", Const, 0, ""},
+ {"PR_SET_DUMPABLE", Const, 0, ""},
+ {"PR_SET_ENDIAN", Const, 0, ""},
+ {"PR_SET_FPEMU", Const, 0, ""},
+ {"PR_SET_FPEXC", Const, 0, ""},
+ {"PR_SET_KEEPCAPS", Const, 0, ""},
+ {"PR_SET_NAME", Const, 0, ""},
+ {"PR_SET_PDEATHSIG", Const, 0, ""},
+ {"PR_SET_PTRACER", Const, 0, ""},
+ {"PR_SET_SECCOMP", Const, 0, ""},
+ {"PR_SET_SECCOMP_FILTER", Const, 0, ""},
+ {"PR_SET_SECUREBITS", Const, 0, ""},
+ {"PR_SET_TIMERSLACK", Const, 0, ""},
+ {"PR_SET_TIMING", Const, 0, ""},
+ {"PR_SET_TSC", Const, 0, ""},
+ {"PR_SET_UNALIGN", Const, 0, ""},
+ {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0, ""},
+ {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0, ""},
+ {"PR_TIMING_STATISTICAL", Const, 0, ""},
+ {"PR_TIMING_TIMESTAMP", Const, 0, ""},
+ {"PR_TSC_ENABLE", Const, 0, ""},
+ {"PR_TSC_SIGSEGV", Const, 0, ""},
+ {"PR_UNALIGN_NOPRINT", Const, 0, ""},
+ {"PR_UNALIGN_SIGBUS", Const, 0, ""},
+ {"PTRACE_ARCH_PRCTL", Const, 0, ""},
+ {"PTRACE_ATTACH", Const, 0, ""},
+ {"PTRACE_CONT", Const, 0, ""},
+ {"PTRACE_DETACH", Const, 0, ""},
+ {"PTRACE_EVENT_CLONE", Const, 0, ""},
+ {"PTRACE_EVENT_EXEC", Const, 0, ""},
+ {"PTRACE_EVENT_EXIT", Const, 0, ""},
+ {"PTRACE_EVENT_FORK", Const, 0, ""},
+ {"PTRACE_EVENT_VFORK", Const, 0, ""},
+ {"PTRACE_EVENT_VFORK_DONE", Const, 0, ""},
+ {"PTRACE_GETCRUNCHREGS", Const, 0, ""},
+ {"PTRACE_GETEVENTMSG", Const, 0, ""},
+ {"PTRACE_GETFPREGS", Const, 0, ""},
+ {"PTRACE_GETFPXREGS", Const, 0, ""},
+ {"PTRACE_GETHBPREGS", Const, 0, ""},
+ {"PTRACE_GETREGS", Const, 0, ""},
+ {"PTRACE_GETREGSET", Const, 0, ""},
+ {"PTRACE_GETSIGINFO", Const, 0, ""},
+ {"PTRACE_GETVFPREGS", Const, 0, ""},
+ {"PTRACE_GETWMMXREGS", Const, 0, ""},
+ {"PTRACE_GET_THREAD_AREA", Const, 0, ""},
+ {"PTRACE_KILL", Const, 0, ""},
+ {"PTRACE_OLDSETOPTIONS", Const, 0, ""},
+ {"PTRACE_O_MASK", Const, 0, ""},
+ {"PTRACE_O_TRACECLONE", Const, 0, ""},
+ {"PTRACE_O_TRACEEXEC", Const, 0, ""},
+ {"PTRACE_O_TRACEEXIT", Const, 0, ""},
+ {"PTRACE_O_TRACEFORK", Const, 0, ""},
+ {"PTRACE_O_TRACESYSGOOD", Const, 0, ""},
+ {"PTRACE_O_TRACEVFORK", Const, 0, ""},
+ {"PTRACE_O_TRACEVFORKDONE", Const, 0, ""},
+ {"PTRACE_PEEKDATA", Const, 0, ""},
+ {"PTRACE_PEEKTEXT", Const, 0, ""},
+ {"PTRACE_PEEKUSR", Const, 0, ""},
+ {"PTRACE_POKEDATA", Const, 0, ""},
+ {"PTRACE_POKETEXT", Const, 0, ""},
+ {"PTRACE_POKEUSR", Const, 0, ""},
+ {"PTRACE_SETCRUNCHREGS", Const, 0, ""},
+ {"PTRACE_SETFPREGS", Const, 0, ""},
+ {"PTRACE_SETFPXREGS", Const, 0, ""},
+ {"PTRACE_SETHBPREGS", Const, 0, ""},
+ {"PTRACE_SETOPTIONS", Const, 0, ""},
+ {"PTRACE_SETREGS", Const, 0, ""},
+ {"PTRACE_SETREGSET", Const, 0, ""},
+ {"PTRACE_SETSIGINFO", Const, 0, ""},
+ {"PTRACE_SETVFPREGS", Const, 0, ""},
+ {"PTRACE_SETWMMXREGS", Const, 0, ""},
+ {"PTRACE_SET_SYSCALL", Const, 0, ""},
+ {"PTRACE_SET_THREAD_AREA", Const, 0, ""},
+ {"PTRACE_SINGLEBLOCK", Const, 0, ""},
+ {"PTRACE_SINGLESTEP", Const, 0, ""},
+ {"PTRACE_SYSCALL", Const, 0, ""},
+ {"PTRACE_SYSEMU", Const, 0, ""},
+ {"PTRACE_SYSEMU_SINGLESTEP", Const, 0, ""},
+ {"PTRACE_TRACEME", Const, 0, ""},
+ {"PT_ATTACH", Const, 0, ""},
+ {"PT_ATTACHEXC", Const, 0, ""},
+ {"PT_CONTINUE", Const, 0, ""},
+ {"PT_DATA_ADDR", Const, 0, ""},
+ {"PT_DENY_ATTACH", Const, 0, ""},
+ {"PT_DETACH", Const, 0, ""},
+ {"PT_FIRSTMACH", Const, 0, ""},
+ {"PT_FORCEQUOTA", Const, 0, ""},
+ {"PT_KILL", Const, 0, ""},
+ {"PT_MASK", Const, 1, ""},
+ {"PT_READ_D", Const, 0, ""},
+ {"PT_READ_I", Const, 0, ""},
+ {"PT_READ_U", Const, 0, ""},
+ {"PT_SIGEXC", Const, 0, ""},
+ {"PT_STEP", Const, 0, ""},
+ {"PT_TEXT_ADDR", Const, 0, ""},
+ {"PT_TEXT_END_ADDR", Const, 0, ""},
+ {"PT_THUPDATE", Const, 0, ""},
+ {"PT_TRACE_ME", Const, 0, ""},
+ {"PT_WRITE_D", Const, 0, ""},
+ {"PT_WRITE_I", Const, 0, ""},
+ {"PT_WRITE_U", Const, 0, ""},
+ {"ParseDirent", Func, 0, "func(buf []byte, max int, names []string) (consumed int, count int, newnames []string)"},
+ {"ParseNetlinkMessage", Func, 0, "func(b []byte) ([]NetlinkMessage, error)"},
+ {"ParseNetlinkRouteAttr", Func, 0, "func(m *NetlinkMessage) ([]NetlinkRouteAttr, error)"},
+ {"ParseRoutingMessage", Func, 0, ""},
+ {"ParseRoutingSockaddr", Func, 0, ""},
+ {"ParseSocketControlMessage", Func, 0, "func(b []byte) ([]SocketControlMessage, error)"},
+ {"ParseUnixCredentials", Func, 0, "func(m *SocketControlMessage) (*Ucred, error)"},
+ {"ParseUnixRights", Func, 0, "func(m *SocketControlMessage) ([]int, error)"},
+ {"PathMax", Const, 0, ""},
+ {"Pathconf", Func, 0, ""},
+ {"Pause", Func, 0, "func() (err error)"},
+ {"Pipe", Func, 0, "func(p []int) error"},
+ {"Pipe2", Func, 1, "func(p []int, flags int) error"},
+ {"PivotRoot", Func, 0, "func(newroot string, putold string) (err error)"},
+ {"Pointer", Type, 11, ""},
+ {"PostQueuedCompletionStatus", Func, 0, ""},
+ {"Pread", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
+ {"Proc", Type, 0, ""},
+ {"Proc.Dll", Field, 0, ""},
+ {"Proc.Name", Field, 0, ""},
+ {"ProcAttr", Type, 0, ""},
+ {"ProcAttr.Dir", Field, 0, ""},
+ {"ProcAttr.Env", Field, 0, ""},
+ {"ProcAttr.Files", Field, 0, ""},
+ {"ProcAttr.Sys", Field, 0, ""},
+ {"Process32First", Func, 4, ""},
+ {"Process32Next", Func, 4, ""},
+ {"ProcessEntry32", Type, 4, ""},
+ {"ProcessEntry32.DefaultHeapID", Field, 4, ""},
+ {"ProcessEntry32.ExeFile", Field, 4, ""},
+ {"ProcessEntry32.Flags", Field, 4, ""},
+ {"ProcessEntry32.ModuleID", Field, 4, ""},
+ {"ProcessEntry32.ParentProcessID", Field, 4, ""},
+ {"ProcessEntry32.PriClassBase", Field, 4, ""},
+ {"ProcessEntry32.ProcessID", Field, 4, ""},
+ {"ProcessEntry32.Size", Field, 4, ""},
+ {"ProcessEntry32.Threads", Field, 4, ""},
+ {"ProcessEntry32.Usage", Field, 4, ""},
+ {"ProcessInformation", Type, 0, ""},
+ {"ProcessInformation.Process", Field, 0, ""},
+ {"ProcessInformation.ProcessId", Field, 0, ""},
+ {"ProcessInformation.Thread", Field, 0, ""},
+ {"ProcessInformation.ThreadId", Field, 0, ""},
+ {"Protoent", Type, 0, ""},
+ {"Protoent.Aliases", Field, 0, ""},
+ {"Protoent.Name", Field, 0, ""},
+ {"Protoent.Proto", Field, 0, ""},
+ {"PtraceAttach", Func, 0, "func(pid int) (err error)"},
+ {"PtraceCont", Func, 0, "func(pid int, signal int) (err error)"},
+ {"PtraceDetach", Func, 0, "func(pid int) (err error)"},
+ {"PtraceGetEventMsg", Func, 0, "func(pid int) (msg uint, err error)"},
+ {"PtraceGetRegs", Func, 0, "func(pid int, regsout *PtraceRegs) (err error)"},
+ {"PtracePeekData", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
+ {"PtracePeekText", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
+ {"PtracePokeData", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
+ {"PtracePokeText", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
+ {"PtraceRegs", Type, 0, ""},
+ {"PtraceRegs.Cs", Field, 0, ""},
+ {"PtraceRegs.Ds", Field, 0, ""},
+ {"PtraceRegs.Eax", Field, 0, ""},
+ {"PtraceRegs.Ebp", Field, 0, ""},
+ {"PtraceRegs.Ebx", Field, 0, ""},
+ {"PtraceRegs.Ecx", Field, 0, ""},
+ {"PtraceRegs.Edi", Field, 0, ""},
+ {"PtraceRegs.Edx", Field, 0, ""},
+ {"PtraceRegs.Eflags", Field, 0, ""},
+ {"PtraceRegs.Eip", Field, 0, ""},
+ {"PtraceRegs.Es", Field, 0, ""},
+ {"PtraceRegs.Esi", Field, 0, ""},
+ {"PtraceRegs.Esp", Field, 0, ""},
+ {"PtraceRegs.Fs", Field, 0, ""},
+ {"PtraceRegs.Fs_base", Field, 0, ""},
+ {"PtraceRegs.Gs", Field, 0, ""},
+ {"PtraceRegs.Gs_base", Field, 0, ""},
+ {"PtraceRegs.Orig_eax", Field, 0, ""},
+ {"PtraceRegs.Orig_rax", Field, 0, ""},
+ {"PtraceRegs.R10", Field, 0, ""},
+ {"PtraceRegs.R11", Field, 0, ""},
+ {"PtraceRegs.R12", Field, 0, ""},
+ {"PtraceRegs.R13", Field, 0, ""},
+ {"PtraceRegs.R14", Field, 0, ""},
+ {"PtraceRegs.R15", Field, 0, ""},
+ {"PtraceRegs.R8", Field, 0, ""},
+ {"PtraceRegs.R9", Field, 0, ""},
+ {"PtraceRegs.Rax", Field, 0, ""},
+ {"PtraceRegs.Rbp", Field, 0, ""},
+ {"PtraceRegs.Rbx", Field, 0, ""},
+ {"PtraceRegs.Rcx", Field, 0, ""},
+ {"PtraceRegs.Rdi", Field, 0, ""},
+ {"PtraceRegs.Rdx", Field, 0, ""},
+ {"PtraceRegs.Rip", Field, 0, ""},
+ {"PtraceRegs.Rsi", Field, 0, ""},
+ {"PtraceRegs.Rsp", Field, 0, ""},
+ {"PtraceRegs.Ss", Field, 0, ""},
+ {"PtraceRegs.Uregs", Field, 0, ""},
+ {"PtraceRegs.Xcs", Field, 0, ""},
+ {"PtraceRegs.Xds", Field, 0, ""},
+ {"PtraceRegs.Xes", Field, 0, ""},
+ {"PtraceRegs.Xfs", Field, 0, ""},
+ {"PtraceRegs.Xgs", Field, 0, ""},
+ {"PtraceRegs.Xss", Field, 0, ""},
+ {"PtraceSetOptions", Func, 0, "func(pid int, options int) (err error)"},
+ {"PtraceSetRegs", Func, 0, "func(pid int, regs *PtraceRegs) (err error)"},
+ {"PtraceSingleStep", Func, 0, "func(pid int) (err error)"},
+ {"PtraceSyscall", Func, 1, "func(pid int, signal int) (err error)"},
+ {"Pwrite", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
+ {"REG_BINARY", Const, 0, ""},
+ {"REG_DWORD", Const, 0, ""},
+ {"REG_DWORD_BIG_ENDIAN", Const, 0, ""},
+ {"REG_DWORD_LITTLE_ENDIAN", Const, 0, ""},
+ {"REG_EXPAND_SZ", Const, 0, ""},
+ {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0, ""},
+ {"REG_LINK", Const, 0, ""},
+ {"REG_MULTI_SZ", Const, 0, ""},
+ {"REG_NONE", Const, 0, ""},
+ {"REG_QWORD", Const, 0, ""},
+ {"REG_QWORD_LITTLE_ENDIAN", Const, 0, ""},
+ {"REG_RESOURCE_LIST", Const, 0, ""},
+ {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0, ""},
+ {"REG_SZ", Const, 0, ""},
+ {"RLIMIT_AS", Const, 0, ""},
+ {"RLIMIT_CORE", Const, 0, ""},
+ {"RLIMIT_CPU", Const, 0, ""},
+ {"RLIMIT_CPU_USAGE_MONITOR", Const, 16, ""},
+ {"RLIMIT_DATA", Const, 0, ""},
+ {"RLIMIT_FSIZE", Const, 0, ""},
+ {"RLIMIT_NOFILE", Const, 0, ""},
+ {"RLIMIT_STACK", Const, 0, ""},
+ {"RLIM_INFINITY", Const, 0, ""},
+ {"RTAX_ADVMSS", Const, 0, ""},
+ {"RTAX_AUTHOR", Const, 0, ""},
+ {"RTAX_BRD", Const, 0, ""},
+ {"RTAX_CWND", Const, 0, ""},
+ {"RTAX_DST", Const, 0, ""},
+ {"RTAX_FEATURES", Const, 0, ""},
+ {"RTAX_FEATURE_ALLFRAG", Const, 0, ""},
+ {"RTAX_FEATURE_ECN", Const, 0, ""},
+ {"RTAX_FEATURE_SACK", Const, 0, ""},
+ {"RTAX_FEATURE_TIMESTAMP", Const, 0, ""},
+ {"RTAX_GATEWAY", Const, 0, ""},
+ {"RTAX_GENMASK", Const, 0, ""},
+ {"RTAX_HOPLIMIT", Const, 0, ""},
+ {"RTAX_IFA", Const, 0, ""},
+ {"RTAX_IFP", Const, 0, ""},
+ {"RTAX_INITCWND", Const, 0, ""},
+ {"RTAX_INITRWND", Const, 0, ""},
+ {"RTAX_LABEL", Const, 1, ""},
+ {"RTAX_LOCK", Const, 0, ""},
+ {"RTAX_MAX", Const, 0, ""},
+ {"RTAX_MTU", Const, 0, ""},
+ {"RTAX_NETMASK", Const, 0, ""},
+ {"RTAX_REORDERING", Const, 0, ""},
+ {"RTAX_RTO_MIN", Const, 0, ""},
+ {"RTAX_RTT", Const, 0, ""},
+ {"RTAX_RTTVAR", Const, 0, ""},
+ {"RTAX_SRC", Const, 1, ""},
+ {"RTAX_SRCMASK", Const, 1, ""},
+ {"RTAX_SSTHRESH", Const, 0, ""},
+ {"RTAX_TAG", Const, 1, ""},
+ {"RTAX_UNSPEC", Const, 0, ""},
+ {"RTAX_WINDOW", Const, 0, ""},
+ {"RTA_ALIGNTO", Const, 0, ""},
+ {"RTA_AUTHOR", Const, 0, ""},
+ {"RTA_BRD", Const, 0, ""},
+ {"RTA_CACHEINFO", Const, 0, ""},
+ {"RTA_DST", Const, 0, ""},
+ {"RTA_FLOW", Const, 0, ""},
+ {"RTA_GATEWAY", Const, 0, ""},
+ {"RTA_GENMASK", Const, 0, ""},
+ {"RTA_IFA", Const, 0, ""},
+ {"RTA_IFP", Const, 0, ""},
+ {"RTA_IIF", Const, 0, ""},
+ {"RTA_LABEL", Const, 1, ""},
+ {"RTA_MAX", Const, 0, ""},
+ {"RTA_METRICS", Const, 0, ""},
+ {"RTA_MULTIPATH", Const, 0, ""},
+ {"RTA_NETMASK", Const, 0, ""},
+ {"RTA_OIF", Const, 0, ""},
+ {"RTA_PREFSRC", Const, 0, ""},
+ {"RTA_PRIORITY", Const, 0, ""},
+ {"RTA_SRC", Const, 0, ""},
+ {"RTA_SRCMASK", Const, 1, ""},
+ {"RTA_TABLE", Const, 0, ""},
+ {"RTA_TAG", Const, 1, ""},
+ {"RTA_UNSPEC", Const, 0, ""},
+ {"RTCF_DIRECTSRC", Const, 0, ""},
+ {"RTCF_DOREDIRECT", Const, 0, ""},
+ {"RTCF_LOG", Const, 0, ""},
+ {"RTCF_MASQ", Const, 0, ""},
+ {"RTCF_NAT", Const, 0, ""},
+ {"RTCF_VALVE", Const, 0, ""},
+ {"RTF_ADDRCLASSMASK", Const, 0, ""},
+ {"RTF_ADDRCONF", Const, 0, ""},
+ {"RTF_ALLONLINK", Const, 0, ""},
+ {"RTF_ANNOUNCE", Const, 1, ""},
+ {"RTF_BLACKHOLE", Const, 0, ""},
+ {"RTF_BROADCAST", Const, 0, ""},
+ {"RTF_CACHE", Const, 0, ""},
+ {"RTF_CLONED", Const, 1, ""},
+ {"RTF_CLONING", Const, 0, ""},
+ {"RTF_CONDEMNED", Const, 0, ""},
+ {"RTF_DEFAULT", Const, 0, ""},
+ {"RTF_DELCLONE", Const, 0, ""},
+ {"RTF_DONE", Const, 0, ""},
+ {"RTF_DYNAMIC", Const, 0, ""},
+ {"RTF_FLOW", Const, 0, ""},
+ {"RTF_FMASK", Const, 0, ""},
+ {"RTF_GATEWAY", Const, 0, ""},
+ {"RTF_GWFLAG_COMPAT", Const, 3, ""},
+ {"RTF_HOST", Const, 0, ""},
+ {"RTF_IFREF", Const, 0, ""},
+ {"RTF_IFSCOPE", Const, 0, ""},
+ {"RTF_INTERFACE", Const, 0, ""},
+ {"RTF_IRTT", Const, 0, ""},
+ {"RTF_LINKRT", Const, 0, ""},
+ {"RTF_LLDATA", Const, 0, ""},
+ {"RTF_LLINFO", Const, 0, ""},
+ {"RTF_LOCAL", Const, 0, ""},
+ {"RTF_MASK", Const, 1, ""},
+ {"RTF_MODIFIED", Const, 0, ""},
+ {"RTF_MPATH", Const, 1, ""},
+ {"RTF_MPLS", Const, 1, ""},
+ {"RTF_MSS", Const, 0, ""},
+ {"RTF_MTU", Const, 0, ""},
+ {"RTF_MULTICAST", Const, 0, ""},
+ {"RTF_NAT", Const, 0, ""},
+ {"RTF_NOFORWARD", Const, 0, ""},
+ {"RTF_NONEXTHOP", Const, 0, ""},
+ {"RTF_NOPMTUDISC", Const, 0, ""},
+ {"RTF_PERMANENT_ARP", Const, 1, ""},
+ {"RTF_PINNED", Const, 0, ""},
+ {"RTF_POLICY", Const, 0, ""},
+ {"RTF_PRCLONING", Const, 0, ""},
+ {"RTF_PROTO1", Const, 0, ""},
+ {"RTF_PROTO2", Const, 0, ""},
+ {"RTF_PROTO3", Const, 0, ""},
+ {"RTF_PROXY", Const, 16, ""},
+ {"RTF_REINSTATE", Const, 0, ""},
+ {"RTF_REJECT", Const, 0, ""},
+ {"RTF_RNH_LOCKED", Const, 0, ""},
+ {"RTF_ROUTER", Const, 16, ""},
+ {"RTF_SOURCE", Const, 1, ""},
+ {"RTF_SRC", Const, 1, ""},
+ {"RTF_STATIC", Const, 0, ""},
+ {"RTF_STICKY", Const, 0, ""},
+ {"RTF_THROW", Const, 0, ""},
+ {"RTF_TUNNEL", Const, 1, ""},
+ {"RTF_UP", Const, 0, ""},
+ {"RTF_USETRAILERS", Const, 1, ""},
+ {"RTF_WASCLONED", Const, 0, ""},
+ {"RTF_WINDOW", Const, 0, ""},
+ {"RTF_XRESOLVE", Const, 0, ""},
+ {"RTM_ADD", Const, 0, ""},
+ {"RTM_BASE", Const, 0, ""},
+ {"RTM_CHANGE", Const, 0, ""},
+ {"RTM_CHGADDR", Const, 1, ""},
+ {"RTM_DELACTION", Const, 0, ""},
+ {"RTM_DELADDR", Const, 0, ""},
+ {"RTM_DELADDRLABEL", Const, 0, ""},
+ {"RTM_DELETE", Const, 0, ""},
+ {"RTM_DELLINK", Const, 0, ""},
+ {"RTM_DELMADDR", Const, 0, ""},
+ {"RTM_DELNEIGH", Const, 0, ""},
+ {"RTM_DELQDISC", Const, 0, ""},
+ {"RTM_DELROUTE", Const, 0, ""},
+ {"RTM_DELRULE", Const, 0, ""},
+ {"RTM_DELTCLASS", Const, 0, ""},
+ {"RTM_DELTFILTER", Const, 0, ""},
+ {"RTM_DESYNC", Const, 1, ""},
+ {"RTM_F_CLONED", Const, 0, ""},
+ {"RTM_F_EQUALIZE", Const, 0, ""},
+ {"RTM_F_NOTIFY", Const, 0, ""},
+ {"RTM_F_PREFIX", Const, 0, ""},
+ {"RTM_GET", Const, 0, ""},
+ {"RTM_GET2", Const, 0, ""},
+ {"RTM_GETACTION", Const, 0, ""},
+ {"RTM_GETADDR", Const, 0, ""},
+ {"RTM_GETADDRLABEL", Const, 0, ""},
+ {"RTM_GETANYCAST", Const, 0, ""},
+ {"RTM_GETDCB", Const, 0, ""},
+ {"RTM_GETLINK", Const, 0, ""},
+ {"RTM_GETMULTICAST", Const, 0, ""},
+ {"RTM_GETNEIGH", Const, 0, ""},
+ {"RTM_GETNEIGHTBL", Const, 0, ""},
+ {"RTM_GETQDISC", Const, 0, ""},
+ {"RTM_GETROUTE", Const, 0, ""},
+ {"RTM_GETRULE", Const, 0, ""},
+ {"RTM_GETTCLASS", Const, 0, ""},
+ {"RTM_GETTFILTER", Const, 0, ""},
+ {"RTM_IEEE80211", Const, 0, ""},
+ {"RTM_IFANNOUNCE", Const, 0, ""},
+ {"RTM_IFINFO", Const, 0, ""},
+ {"RTM_IFINFO2", Const, 0, ""},
+ {"RTM_LLINFO_UPD", Const, 1, ""},
+ {"RTM_LOCK", Const, 0, ""},
+ {"RTM_LOSING", Const, 0, ""},
+ {"RTM_MAX", Const, 0, ""},
+ {"RTM_MAXSIZE", Const, 1, ""},
+ {"RTM_MISS", Const, 0, ""},
+ {"RTM_NEWACTION", Const, 0, ""},
+ {"RTM_NEWADDR", Const, 0, ""},
+ {"RTM_NEWADDRLABEL", Const, 0, ""},
+ {"RTM_NEWLINK", Const, 0, ""},
+ {"RTM_NEWMADDR", Const, 0, ""},
+ {"RTM_NEWMADDR2", Const, 0, ""},
+ {"RTM_NEWNDUSEROPT", Const, 0, ""},
+ {"RTM_NEWNEIGH", Const, 0, ""},
+ {"RTM_NEWNEIGHTBL", Const, 0, ""},
+ {"RTM_NEWPREFIX", Const, 0, ""},
+ {"RTM_NEWQDISC", Const, 0, ""},
+ {"RTM_NEWROUTE", Const, 0, ""},
+ {"RTM_NEWRULE", Const, 0, ""},
+ {"RTM_NEWTCLASS", Const, 0, ""},
+ {"RTM_NEWTFILTER", Const, 0, ""},
+ {"RTM_NR_FAMILIES", Const, 0, ""},
+ {"RTM_NR_MSGTYPES", Const, 0, ""},
+ {"RTM_OIFINFO", Const, 1, ""},
+ {"RTM_OLDADD", Const, 0, ""},
+ {"RTM_OLDDEL", Const, 0, ""},
+ {"RTM_OOIFINFO", Const, 1, ""},
+ {"RTM_REDIRECT", Const, 0, ""},
+ {"RTM_RESOLVE", Const, 0, ""},
+ {"RTM_RTTUNIT", Const, 0, ""},
+ {"RTM_SETDCB", Const, 0, ""},
+ {"RTM_SETGATE", Const, 1, ""},
+ {"RTM_SETLINK", Const, 0, ""},
+ {"RTM_SETNEIGHTBL", Const, 0, ""},
+ {"RTM_VERSION", Const, 0, ""},
+ {"RTNH_ALIGNTO", Const, 0, ""},
+ {"RTNH_F_DEAD", Const, 0, ""},
+ {"RTNH_F_ONLINK", Const, 0, ""},
+ {"RTNH_F_PERVASIVE", Const, 0, ""},
+ {"RTNLGRP_IPV4_IFADDR", Const, 1, ""},
+ {"RTNLGRP_IPV4_MROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV4_ROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV4_RULE", Const, 1, ""},
+ {"RTNLGRP_IPV6_IFADDR", Const, 1, ""},
+ {"RTNLGRP_IPV6_IFINFO", Const, 1, ""},
+ {"RTNLGRP_IPV6_MROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV6_PREFIX", Const, 1, ""},
+ {"RTNLGRP_IPV6_ROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV6_RULE", Const, 1, ""},
+ {"RTNLGRP_LINK", Const, 1, ""},
+ {"RTNLGRP_ND_USEROPT", Const, 1, ""},
+ {"RTNLGRP_NEIGH", Const, 1, ""},
+ {"RTNLGRP_NONE", Const, 1, ""},
+ {"RTNLGRP_NOTIFY", Const, 1, ""},
+ {"RTNLGRP_TC", Const, 1, ""},
+ {"RTN_ANYCAST", Const, 0, ""},
+ {"RTN_BLACKHOLE", Const, 0, ""},
+ {"RTN_BROADCAST", Const, 0, ""},
+ {"RTN_LOCAL", Const, 0, ""},
+ {"RTN_MAX", Const, 0, ""},
+ {"RTN_MULTICAST", Const, 0, ""},
+ {"RTN_NAT", Const, 0, ""},
+ {"RTN_PROHIBIT", Const, 0, ""},
+ {"RTN_THROW", Const, 0, ""},
+ {"RTN_UNICAST", Const, 0, ""},
+ {"RTN_UNREACHABLE", Const, 0, ""},
+ {"RTN_UNSPEC", Const, 0, ""},
+ {"RTN_XRESOLVE", Const, 0, ""},
+ {"RTPROT_BIRD", Const, 0, ""},
+ {"RTPROT_BOOT", Const, 0, ""},
+ {"RTPROT_DHCP", Const, 0, ""},
+ {"RTPROT_DNROUTED", Const, 0, ""},
+ {"RTPROT_GATED", Const, 0, ""},
+ {"RTPROT_KERNEL", Const, 0, ""},
+ {"RTPROT_MRT", Const, 0, ""},
+ {"RTPROT_NTK", Const, 0, ""},
+ {"RTPROT_RA", Const, 0, ""},
+ {"RTPROT_REDIRECT", Const, 0, ""},
+ {"RTPROT_STATIC", Const, 0, ""},
+ {"RTPROT_UNSPEC", Const, 0, ""},
+ {"RTPROT_XORP", Const, 0, ""},
+ {"RTPROT_ZEBRA", Const, 0, ""},
+ {"RTV_EXPIRE", Const, 0, ""},
+ {"RTV_HOPCOUNT", Const, 0, ""},
+ {"RTV_MTU", Const, 0, ""},
+ {"RTV_RPIPE", Const, 0, ""},
+ {"RTV_RTT", Const, 0, ""},
+ {"RTV_RTTVAR", Const, 0, ""},
+ {"RTV_SPIPE", Const, 0, ""},
+ {"RTV_SSTHRESH", Const, 0, ""},
+ {"RTV_WEIGHT", Const, 0, ""},
+ {"RT_CACHING_CONTEXT", Const, 1, ""},
+ {"RT_CLASS_DEFAULT", Const, 0, ""},
+ {"RT_CLASS_LOCAL", Const, 0, ""},
+ {"RT_CLASS_MAIN", Const, 0, ""},
+ {"RT_CLASS_MAX", Const, 0, ""},
+ {"RT_CLASS_UNSPEC", Const, 0, ""},
+ {"RT_DEFAULT_FIB", Const, 1, ""},
+ {"RT_NORTREF", Const, 1, ""},
+ {"RT_SCOPE_HOST", Const, 0, ""},
+ {"RT_SCOPE_LINK", Const, 0, ""},
+ {"RT_SCOPE_NOWHERE", Const, 0, ""},
+ {"RT_SCOPE_SITE", Const, 0, ""},
+ {"RT_SCOPE_UNIVERSE", Const, 0, ""},
+ {"RT_TABLEID_MAX", Const, 1, ""},
+ {"RT_TABLE_COMPAT", Const, 0, ""},
+ {"RT_TABLE_DEFAULT", Const, 0, ""},
+ {"RT_TABLE_LOCAL", Const, 0, ""},
+ {"RT_TABLE_MAIN", Const, 0, ""},
+ {"RT_TABLE_MAX", Const, 0, ""},
+ {"RT_TABLE_UNSPEC", Const, 0, ""},
+ {"RUSAGE_CHILDREN", Const, 0, ""},
+ {"RUSAGE_SELF", Const, 0, ""},
+ {"RUSAGE_THREAD", Const, 0, ""},
+ {"Radvisory_t", Type, 0, ""},
+ {"Radvisory_t.Count", Field, 0, ""},
+ {"Radvisory_t.Offset", Field, 0, ""},
+ {"Radvisory_t.Pad_cgo_0", Field, 0, ""},
+ {"RawConn", Type, 9, ""},
+ {"RawSockaddr", Type, 0, ""},
+ {"RawSockaddr.Data", Field, 0, ""},
+ {"RawSockaddr.Family", Field, 0, ""},
+ {"RawSockaddr.Len", Field, 0, ""},
+ {"RawSockaddrAny", Type, 0, ""},
+ {"RawSockaddrAny.Addr", Field, 0, ""},
+ {"RawSockaddrAny.Pad", Field, 0, ""},
+ {"RawSockaddrDatalink", Type, 0, ""},
+ {"RawSockaddrDatalink.Alen", Field, 0, ""},
+ {"RawSockaddrDatalink.Data", Field, 0, ""},
+ {"RawSockaddrDatalink.Family", Field, 0, ""},
+ {"RawSockaddrDatalink.Index", Field, 0, ""},
+ {"RawSockaddrDatalink.Len", Field, 0, ""},
+ {"RawSockaddrDatalink.Nlen", Field, 0, ""},
+ {"RawSockaddrDatalink.Pad_cgo_0", Field, 2, ""},
+ {"RawSockaddrDatalink.Slen", Field, 0, ""},
+ {"RawSockaddrDatalink.Type", Field, 0, ""},
+ {"RawSockaddrInet4", Type, 0, ""},
+ {"RawSockaddrInet4.Addr", Field, 0, ""},
+ {"RawSockaddrInet4.Family", Field, 0, ""},
+ {"RawSockaddrInet4.Len", Field, 0, ""},
+ {"RawSockaddrInet4.Port", Field, 0, ""},
+ {"RawSockaddrInet4.Zero", Field, 0, ""},
+ {"RawSockaddrInet6", Type, 0, ""},
+ {"RawSockaddrInet6.Addr", Field, 0, ""},
+ {"RawSockaddrInet6.Family", Field, 0, ""},
+ {"RawSockaddrInet6.Flowinfo", Field, 0, ""},
+ {"RawSockaddrInet6.Len", Field, 0, ""},
+ {"RawSockaddrInet6.Port", Field, 0, ""},
+ {"RawSockaddrInet6.Scope_id", Field, 0, ""},
+ {"RawSockaddrLinklayer", Type, 0, ""},
+ {"RawSockaddrLinklayer.Addr", Field, 0, ""},
+ {"RawSockaddrLinklayer.Family", Field, 0, ""},
+ {"RawSockaddrLinklayer.Halen", Field, 0, ""},
+ {"RawSockaddrLinklayer.Hatype", Field, 0, ""},
+ {"RawSockaddrLinklayer.Ifindex", Field, 0, ""},
+ {"RawSockaddrLinklayer.Pkttype", Field, 0, ""},
+ {"RawSockaddrLinklayer.Protocol", Field, 0, ""},
+ {"RawSockaddrNetlink", Type, 0, ""},
+ {"RawSockaddrNetlink.Family", Field, 0, ""},
+ {"RawSockaddrNetlink.Groups", Field, 0, ""},
+ {"RawSockaddrNetlink.Pad", Field, 0, ""},
+ {"RawSockaddrNetlink.Pid", Field, 0, ""},
+ {"RawSockaddrUnix", Type, 0, ""},
+ {"RawSockaddrUnix.Family", Field, 0, ""},
+ {"RawSockaddrUnix.Len", Field, 0, ""},
+ {"RawSockaddrUnix.Pad_cgo_0", Field, 2, ""},
+ {"RawSockaddrUnix.Path", Field, 0, ""},
+ {"RawSyscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"RawSyscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"Read", Func, 0, "func(fd int, p []byte) (n int, err error)"},
+ {"ReadConsole", Func, 1, ""},
+ {"ReadDirectoryChanges", Func, 0, ""},
+ {"ReadDirent", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
+ {"ReadFile", Func, 0, ""},
+ {"Readlink", Func, 0, "func(path string, buf []byte) (n int, err error)"},
+ {"Reboot", Func, 0, "func(cmd int) (err error)"},
+ {"Recvfrom", Func, 0, "func(fd int, p []byte, flags int) (n int, from Sockaddr, err error)"},
+ {"Recvmsg", Func, 0, "func(fd int, p []byte, oob []byte, flags int) (n int, oobn int, recvflags int, from Sockaddr, err error)"},
+ {"RegCloseKey", Func, 0, ""},
+ {"RegEnumKeyEx", Func, 0, ""},
+ {"RegOpenKeyEx", Func, 0, ""},
+ {"RegQueryInfoKey", Func, 0, ""},
+ {"RegQueryValueEx", Func, 0, ""},
+ {"RemoveDirectory", Func, 0, ""},
+ {"Removexattr", Func, 1, "func(path string, attr string) (err error)"},
+ {"Rename", Func, 0, "func(oldpath string, newpath string) (err error)"},
+ {"Renameat", Func, 0, "func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)"},
+ {"Revoke", Func, 0, ""},
+ {"Rlimit", Type, 0, ""},
+ {"Rlimit.Cur", Field, 0, ""},
+ {"Rlimit.Max", Field, 0, ""},
+ {"Rmdir", Func, 0, "func(path string) error"},
+ {"RouteMessage", Type, 0, ""},
+ {"RouteMessage.Data", Field, 0, ""},
+ {"RouteMessage.Header", Field, 0, ""},
+ {"RouteRIB", Func, 0, ""},
+ {"RoutingMessage", Type, 0, ""},
+ {"RtAttr", Type, 0, ""},
+ {"RtAttr.Len", Field, 0, ""},
+ {"RtAttr.Type", Field, 0, ""},
+ {"RtGenmsg", Type, 0, ""},
+ {"RtGenmsg.Family", Field, 0, ""},
+ {"RtMetrics", Type, 0, ""},
+ {"RtMetrics.Expire", Field, 0, ""},
+ {"RtMetrics.Filler", Field, 0, ""},
+ {"RtMetrics.Hopcount", Field, 0, ""},
+ {"RtMetrics.Locks", Field, 0, ""},
+ {"RtMetrics.Mtu", Field, 0, ""},
+ {"RtMetrics.Pad", Field, 3, ""},
+ {"RtMetrics.Pksent", Field, 0, ""},
+ {"RtMetrics.Recvpipe", Field, 0, ""},
+ {"RtMetrics.Refcnt", Field, 2, ""},
+ {"RtMetrics.Rtt", Field, 0, ""},
+ {"RtMetrics.Rttvar", Field, 0, ""},
+ {"RtMetrics.Sendpipe", Field, 0, ""},
+ {"RtMetrics.Ssthresh", Field, 0, ""},
+ {"RtMetrics.Weight", Field, 0, ""},
+ {"RtMsg", Type, 0, ""},
+ {"RtMsg.Dst_len", Field, 0, ""},
+ {"RtMsg.Family", Field, 0, ""},
+ {"RtMsg.Flags", Field, 0, ""},
+ {"RtMsg.Protocol", Field, 0, ""},
+ {"RtMsg.Scope", Field, 0, ""},
+ {"RtMsg.Src_len", Field, 0, ""},
+ {"RtMsg.Table", Field, 0, ""},
+ {"RtMsg.Tos", Field, 0, ""},
+ {"RtMsg.Type", Field, 0, ""},
+ {"RtMsghdr", Type, 0, ""},
+ {"RtMsghdr.Addrs", Field, 0, ""},
+ {"RtMsghdr.Errno", Field, 0, ""},
+ {"RtMsghdr.Flags", Field, 0, ""},
+ {"RtMsghdr.Fmask", Field, 0, ""},
+ {"RtMsghdr.Hdrlen", Field, 2, ""},
+ {"RtMsghdr.Index", Field, 0, ""},
+ {"RtMsghdr.Inits", Field, 0, ""},
+ {"RtMsghdr.Mpls", Field, 2, ""},
+ {"RtMsghdr.Msglen", Field, 0, ""},
+ {"RtMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"RtMsghdr.Pad_cgo_1", Field, 2, ""},
+ {"RtMsghdr.Pid", Field, 0, ""},
+ {"RtMsghdr.Priority", Field, 2, ""},
+ {"RtMsghdr.Rmx", Field, 0, ""},
+ {"RtMsghdr.Seq", Field, 0, ""},
+ {"RtMsghdr.Tableid", Field, 2, ""},
+ {"RtMsghdr.Type", Field, 0, ""},
+ {"RtMsghdr.Use", Field, 0, ""},
+ {"RtMsghdr.Version", Field, 0, ""},
+ {"RtNexthop", Type, 0, ""},
+ {"RtNexthop.Flags", Field, 0, ""},
+ {"RtNexthop.Hops", Field, 0, ""},
+ {"RtNexthop.Ifindex", Field, 0, ""},
+ {"RtNexthop.Len", Field, 0, ""},
+ {"Rusage", Type, 0, ""},
+ {"Rusage.CreationTime", Field, 0, ""},
+ {"Rusage.ExitTime", Field, 0, ""},
+ {"Rusage.Idrss", Field, 0, ""},
+ {"Rusage.Inblock", Field, 0, ""},
+ {"Rusage.Isrss", Field, 0, ""},
+ {"Rusage.Ixrss", Field, 0, ""},
+ {"Rusage.KernelTime", Field, 0, ""},
+ {"Rusage.Majflt", Field, 0, ""},
+ {"Rusage.Maxrss", Field, 0, ""},
+ {"Rusage.Minflt", Field, 0, ""},
+ {"Rusage.Msgrcv", Field, 0, ""},
+ {"Rusage.Msgsnd", Field, 0, ""},
+ {"Rusage.Nivcsw", Field, 0, ""},
+ {"Rusage.Nsignals", Field, 0, ""},
+ {"Rusage.Nswap", Field, 0, ""},
+ {"Rusage.Nvcsw", Field, 0, ""},
+ {"Rusage.Oublock", Field, 0, ""},
+ {"Rusage.Stime", Field, 0, ""},
+ {"Rusage.UserTime", Field, 0, ""},
+ {"Rusage.Utime", Field, 0, ""},
+ {"SCM_BINTIME", Const, 0, ""},
+ {"SCM_CREDENTIALS", Const, 0, ""},
+ {"SCM_CREDS", Const, 0, ""},
+ {"SCM_RIGHTS", Const, 0, ""},
+ {"SCM_TIMESTAMP", Const, 0, ""},
+ {"SCM_TIMESTAMPING", Const, 0, ""},
+ {"SCM_TIMESTAMPNS", Const, 0, ""},
+ {"SCM_TIMESTAMP_MONOTONIC", Const, 0, ""},
+ {"SHUT_RD", Const, 0, ""},
+ {"SHUT_RDWR", Const, 0, ""},
+ {"SHUT_WR", Const, 0, ""},
+ {"SID", Type, 0, ""},
+ {"SIDAndAttributes", Type, 0, ""},
+ {"SIDAndAttributes.Attributes", Field, 0, ""},
+ {"SIDAndAttributes.Sid", Field, 0, ""},
+ {"SIGABRT", Const, 0, ""},
+ {"SIGALRM", Const, 0, ""},
+ {"SIGBUS", Const, 0, ""},
+ {"SIGCHLD", Const, 0, ""},
+ {"SIGCLD", Const, 0, ""},
+ {"SIGCONT", Const, 0, ""},
+ {"SIGEMT", Const, 0, ""},
+ {"SIGFPE", Const, 0, ""},
+ {"SIGHUP", Const, 0, ""},
+ {"SIGILL", Const, 0, ""},
+ {"SIGINFO", Const, 0, ""},
+ {"SIGINT", Const, 0, ""},
+ {"SIGIO", Const, 0, ""},
+ {"SIGIOT", Const, 0, ""},
+ {"SIGKILL", Const, 0, ""},
+ {"SIGLIBRT", Const, 1, ""},
+ {"SIGLWP", Const, 0, ""},
+ {"SIGPIPE", Const, 0, ""},
+ {"SIGPOLL", Const, 0, ""},
+ {"SIGPROF", Const, 0, ""},
+ {"SIGPWR", Const, 0, ""},
+ {"SIGQUIT", Const, 0, ""},
+ {"SIGSEGV", Const, 0, ""},
+ {"SIGSTKFLT", Const, 0, ""},
+ {"SIGSTOP", Const, 0, ""},
+ {"SIGSYS", Const, 0, ""},
+ {"SIGTERM", Const, 0, ""},
+ {"SIGTHR", Const, 0, ""},
+ {"SIGTRAP", Const, 0, ""},
+ {"SIGTSTP", Const, 0, ""},
+ {"SIGTTIN", Const, 0, ""},
+ {"SIGTTOU", Const, 0, ""},
+ {"SIGUNUSED", Const, 0, ""},
+ {"SIGURG", Const, 0, ""},
+ {"SIGUSR1", Const, 0, ""},
+ {"SIGUSR2", Const, 0, ""},
+ {"SIGVTALRM", Const, 0, ""},
+ {"SIGWINCH", Const, 0, ""},
+ {"SIGXCPU", Const, 0, ""},
+ {"SIGXFSZ", Const, 0, ""},
+ {"SIOCADDDLCI", Const, 0, ""},
+ {"SIOCADDMULTI", Const, 0, ""},
+ {"SIOCADDRT", Const, 0, ""},
+ {"SIOCAIFADDR", Const, 0, ""},
+ {"SIOCAIFGROUP", Const, 0, ""},
+ {"SIOCALIFADDR", Const, 0, ""},
+ {"SIOCARPIPLL", Const, 0, ""},
+ {"SIOCATMARK", Const, 0, ""},
+ {"SIOCAUTOADDR", Const, 0, ""},
+ {"SIOCAUTONETMASK", Const, 0, ""},
+ {"SIOCBRDGADD", Const, 1, ""},
+ {"SIOCBRDGADDS", Const, 1, ""},
+ {"SIOCBRDGARL", Const, 1, ""},
+ {"SIOCBRDGDADDR", Const, 1, ""},
+ {"SIOCBRDGDEL", Const, 1, ""},
+ {"SIOCBRDGDELS", Const, 1, ""},
+ {"SIOCBRDGFLUSH", Const, 1, ""},
+ {"SIOCBRDGFRL", Const, 1, ""},
+ {"SIOCBRDGGCACHE", Const, 1, ""},
+ {"SIOCBRDGGFD", Const, 1, ""},
+ {"SIOCBRDGGHT", Const, 1, ""},
+ {"SIOCBRDGGIFFLGS", Const, 1, ""},
+ {"SIOCBRDGGMA", Const, 1, ""},
+ {"SIOCBRDGGPARAM", Const, 1, ""},
+ {"SIOCBRDGGPRI", Const, 1, ""},
+ {"SIOCBRDGGRL", Const, 1, ""},
+ {"SIOCBRDGGSIFS", Const, 1, ""},
+ {"SIOCBRDGGTO", Const, 1, ""},
+ {"SIOCBRDGIFS", Const, 1, ""},
+ {"SIOCBRDGRTS", Const, 1, ""},
+ {"SIOCBRDGSADDR", Const, 1, ""},
+ {"SIOCBRDGSCACHE", Const, 1, ""},
+ {"SIOCBRDGSFD", Const, 1, ""},
+ {"SIOCBRDGSHT", Const, 1, ""},
+ {"SIOCBRDGSIFCOST", Const, 1, ""},
+ {"SIOCBRDGSIFFLGS", Const, 1, ""},
+ {"SIOCBRDGSIFPRIO", Const, 1, ""},
+ {"SIOCBRDGSMA", Const, 1, ""},
+ {"SIOCBRDGSPRI", Const, 1, ""},
+ {"SIOCBRDGSPROTO", Const, 1, ""},
+ {"SIOCBRDGSTO", Const, 1, ""},
+ {"SIOCBRDGSTXHC", Const, 1, ""},
+ {"SIOCDARP", Const, 0, ""},
+ {"SIOCDELDLCI", Const, 0, ""},
+ {"SIOCDELMULTI", Const, 0, ""},
+ {"SIOCDELRT", Const, 0, ""},
+ {"SIOCDEVPRIVATE", Const, 0, ""},
+ {"SIOCDIFADDR", Const, 0, ""},
+ {"SIOCDIFGROUP", Const, 0, ""},
+ {"SIOCDIFPHYADDR", Const, 0, ""},
+ {"SIOCDLIFADDR", Const, 0, ""},
+ {"SIOCDRARP", Const, 0, ""},
+ {"SIOCGARP", Const, 0, ""},
+ {"SIOCGDRVSPEC", Const, 0, ""},
+ {"SIOCGETKALIVE", Const, 1, ""},
+ {"SIOCGETLABEL", Const, 1, ""},
+ {"SIOCGETPFLOW", Const, 1, ""},
+ {"SIOCGETPFSYNC", Const, 1, ""},
+ {"SIOCGETSGCNT", Const, 0, ""},
+ {"SIOCGETVIFCNT", Const, 0, ""},
+ {"SIOCGETVLAN", Const, 0, ""},
+ {"SIOCGHIWAT", Const, 0, ""},
+ {"SIOCGIFADDR", Const, 0, ""},
+ {"SIOCGIFADDRPREF", Const, 1, ""},
+ {"SIOCGIFALIAS", Const, 1, ""},
+ {"SIOCGIFALTMTU", Const, 0, ""},
+ {"SIOCGIFASYNCMAP", Const, 0, ""},
+ {"SIOCGIFBOND", Const, 0, ""},
+ {"SIOCGIFBR", Const, 0, ""},
+ {"SIOCGIFBRDADDR", Const, 0, ""},
+ {"SIOCGIFCAP", Const, 0, ""},
+ {"SIOCGIFCONF", Const, 0, ""},
+ {"SIOCGIFCOUNT", Const, 0, ""},
+ {"SIOCGIFDATA", Const, 1, ""},
+ {"SIOCGIFDESCR", Const, 0, ""},
+ {"SIOCGIFDEVMTU", Const, 0, ""},
+ {"SIOCGIFDLT", Const, 1, ""},
+ {"SIOCGIFDSTADDR", Const, 0, ""},
+ {"SIOCGIFENCAP", Const, 0, ""},
+ {"SIOCGIFFIB", Const, 1, ""},
+ {"SIOCGIFFLAGS", Const, 0, ""},
+ {"SIOCGIFGATTR", Const, 1, ""},
+ {"SIOCGIFGENERIC", Const, 0, ""},
+ {"SIOCGIFGMEMB", Const, 0, ""},
+ {"SIOCGIFGROUP", Const, 0, ""},
+ {"SIOCGIFHARDMTU", Const, 3, ""},
+ {"SIOCGIFHWADDR", Const, 0, ""},
+ {"SIOCGIFINDEX", Const, 0, ""},
+ {"SIOCGIFKPI", Const, 0, ""},
+ {"SIOCGIFMAC", Const, 0, ""},
+ {"SIOCGIFMAP", Const, 0, ""},
+ {"SIOCGIFMEDIA", Const, 0, ""},
+ {"SIOCGIFMEM", Const, 0, ""},
+ {"SIOCGIFMETRIC", Const, 0, ""},
+ {"SIOCGIFMTU", Const, 0, ""},
+ {"SIOCGIFNAME", Const, 0, ""},
+ {"SIOCGIFNETMASK", Const, 0, ""},
+ {"SIOCGIFPDSTADDR", Const, 0, ""},
+ {"SIOCGIFPFLAGS", Const, 0, ""},
+ {"SIOCGIFPHYS", Const, 0, ""},
+ {"SIOCGIFPRIORITY", Const, 1, ""},
+ {"SIOCGIFPSRCADDR", Const, 0, ""},
+ {"SIOCGIFRDOMAIN", Const, 1, ""},
+ {"SIOCGIFRTLABEL", Const, 1, ""},
+ {"SIOCGIFSLAVE", Const, 0, ""},
+ {"SIOCGIFSTATUS", Const, 0, ""},
+ {"SIOCGIFTIMESLOT", Const, 1, ""},
+ {"SIOCGIFTXQLEN", Const, 0, ""},
+ {"SIOCGIFVLAN", Const, 0, ""},
+ {"SIOCGIFWAKEFLAGS", Const, 0, ""},
+ {"SIOCGIFXFLAGS", Const, 1, ""},
+ {"SIOCGLIFADDR", Const, 0, ""},
+ {"SIOCGLIFPHYADDR", Const, 0, ""},
+ {"SIOCGLIFPHYRTABLE", Const, 1, ""},
+ {"SIOCGLIFPHYTTL", Const, 3, ""},
+ {"SIOCGLINKSTR", Const, 1, ""},
+ {"SIOCGLOWAT", Const, 0, ""},
+ {"SIOCGPGRP", Const, 0, ""},
+ {"SIOCGPRIVATE_0", Const, 0, ""},
+ {"SIOCGPRIVATE_1", Const, 0, ""},
+ {"SIOCGRARP", Const, 0, ""},
+ {"SIOCGSPPPPARAMS", Const, 3, ""},
+ {"SIOCGSTAMP", Const, 0, ""},
+ {"SIOCGSTAMPNS", Const, 0, ""},
+ {"SIOCGVH", Const, 1, ""},
+ {"SIOCGVNETID", Const, 3, ""},
+ {"SIOCIFCREATE", Const, 0, ""},
+ {"SIOCIFCREATE2", Const, 0, ""},
+ {"SIOCIFDESTROY", Const, 0, ""},
+ {"SIOCIFGCLONERS", Const, 0, ""},
+ {"SIOCINITIFADDR", Const, 1, ""},
+ {"SIOCPROTOPRIVATE", Const, 0, ""},
+ {"SIOCRSLVMULTI", Const, 0, ""},
+ {"SIOCRTMSG", Const, 0, ""},
+ {"SIOCSARP", Const, 0, ""},
+ {"SIOCSDRVSPEC", Const, 0, ""},
+ {"SIOCSETKALIVE", Const, 1, ""},
+ {"SIOCSETLABEL", Const, 1, ""},
+ {"SIOCSETPFLOW", Const, 1, ""},
+ {"SIOCSETPFSYNC", Const, 1, ""},
+ {"SIOCSETVLAN", Const, 0, ""},
+ {"SIOCSHIWAT", Const, 0, ""},
+ {"SIOCSIFADDR", Const, 0, ""},
+ {"SIOCSIFADDRPREF", Const, 1, ""},
+ {"SIOCSIFALTMTU", Const, 0, ""},
+ {"SIOCSIFASYNCMAP", Const, 0, ""},
+ {"SIOCSIFBOND", Const, 0, ""},
+ {"SIOCSIFBR", Const, 0, ""},
+ {"SIOCSIFBRDADDR", Const, 0, ""},
+ {"SIOCSIFCAP", Const, 0, ""},
+ {"SIOCSIFDESCR", Const, 0, ""},
+ {"SIOCSIFDSTADDR", Const, 0, ""},
+ {"SIOCSIFENCAP", Const, 0, ""},
+ {"SIOCSIFFIB", Const, 1, ""},
+ {"SIOCSIFFLAGS", Const, 0, ""},
+ {"SIOCSIFGATTR", Const, 1, ""},
+ {"SIOCSIFGENERIC", Const, 0, ""},
+ {"SIOCSIFHWADDR", Const, 0, ""},
+ {"SIOCSIFHWBROADCAST", Const, 0, ""},
+ {"SIOCSIFKPI", Const, 0, ""},
+ {"SIOCSIFLINK", Const, 0, ""},
+ {"SIOCSIFLLADDR", Const, 0, ""},
+ {"SIOCSIFMAC", Const, 0, ""},
+ {"SIOCSIFMAP", Const, 0, ""},
+ {"SIOCSIFMEDIA", Const, 0, ""},
+ {"SIOCSIFMEM", Const, 0, ""},
+ {"SIOCSIFMETRIC", Const, 0, ""},
+ {"SIOCSIFMTU", Const, 0, ""},
+ {"SIOCSIFNAME", Const, 0, ""},
+ {"SIOCSIFNETMASK", Const, 0, ""},
+ {"SIOCSIFPFLAGS", Const, 0, ""},
+ {"SIOCSIFPHYADDR", Const, 0, ""},
+ {"SIOCSIFPHYS", Const, 0, ""},
+ {"SIOCSIFPRIORITY", Const, 1, ""},
+ {"SIOCSIFRDOMAIN", Const, 1, ""},
+ {"SIOCSIFRTLABEL", Const, 1, ""},
+ {"SIOCSIFRVNET", Const, 0, ""},
+ {"SIOCSIFSLAVE", Const, 0, ""},
+ {"SIOCSIFTIMESLOT", Const, 1, ""},
+ {"SIOCSIFTXQLEN", Const, 0, ""},
+ {"SIOCSIFVLAN", Const, 0, ""},
+ {"SIOCSIFVNET", Const, 0, ""},
+ {"SIOCSIFXFLAGS", Const, 1, ""},
+ {"SIOCSLIFPHYADDR", Const, 0, ""},
+ {"SIOCSLIFPHYRTABLE", Const, 1, ""},
+ {"SIOCSLIFPHYTTL", Const, 3, ""},
+ {"SIOCSLINKSTR", Const, 1, ""},
+ {"SIOCSLOWAT", Const, 0, ""},
+ {"SIOCSPGRP", Const, 0, ""},
+ {"SIOCSRARP", Const, 0, ""},
+ {"SIOCSSPPPPARAMS", Const, 3, ""},
+ {"SIOCSVH", Const, 1, ""},
+ {"SIOCSVNETID", Const, 3, ""},
+ {"SIOCZIFDATA", Const, 1, ""},
+ {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1, ""},
+ {"SIO_GET_INTERFACE_LIST", Const, 0, ""},
+ {"SIO_KEEPALIVE_VALS", Const, 3, ""},
+ {"SIO_UDP_CONNRESET", Const, 4, ""},
+ {"SOCK_CLOEXEC", Const, 0, ""},
+ {"SOCK_DCCP", Const, 0, ""},
+ {"SOCK_DGRAM", Const, 0, ""},
+ {"SOCK_FLAGS_MASK", Const, 1, ""},
+ {"SOCK_MAXADDRLEN", Const, 0, ""},
+ {"SOCK_NONBLOCK", Const, 0, ""},
+ {"SOCK_NOSIGPIPE", Const, 1, ""},
+ {"SOCK_PACKET", Const, 0, ""},
+ {"SOCK_RAW", Const, 0, ""},
+ {"SOCK_RDM", Const, 0, ""},
+ {"SOCK_SEQPACKET", Const, 0, ""},
+ {"SOCK_STREAM", Const, 0, ""},
+ {"SOL_AAL", Const, 0, ""},
+ {"SOL_ATM", Const, 0, ""},
+ {"SOL_DECNET", Const, 0, ""},
+ {"SOL_ICMPV6", Const, 0, ""},
+ {"SOL_IP", Const, 0, ""},
+ {"SOL_IPV6", Const, 0, ""},
+ {"SOL_IRDA", Const, 0, ""},
+ {"SOL_PACKET", Const, 0, ""},
+ {"SOL_RAW", Const, 0, ""},
+ {"SOL_SOCKET", Const, 0, ""},
+ {"SOL_TCP", Const, 0, ""},
+ {"SOL_X25", Const, 0, ""},
+ {"SOMAXCONN", Const, 0, ""},
+ {"SO_ACCEPTCONN", Const, 0, ""},
+ {"SO_ACCEPTFILTER", Const, 0, ""},
+ {"SO_ATTACH_FILTER", Const, 0, ""},
+ {"SO_BINDANY", Const, 1, ""},
+ {"SO_BINDTODEVICE", Const, 0, ""},
+ {"SO_BINTIME", Const, 0, ""},
+ {"SO_BROADCAST", Const, 0, ""},
+ {"SO_BSDCOMPAT", Const, 0, ""},
+ {"SO_DEBUG", Const, 0, ""},
+ {"SO_DETACH_FILTER", Const, 0, ""},
+ {"SO_DOMAIN", Const, 0, ""},
+ {"SO_DONTROUTE", Const, 0, ""},
+ {"SO_DONTTRUNC", Const, 0, ""},
+ {"SO_ERROR", Const, 0, ""},
+ {"SO_KEEPALIVE", Const, 0, ""},
+ {"SO_LABEL", Const, 0, ""},
+ {"SO_LINGER", Const, 0, ""},
+ {"SO_LINGER_SEC", Const, 0, ""},
+ {"SO_LISTENINCQLEN", Const, 0, ""},
+ {"SO_LISTENQLEN", Const, 0, ""},
+ {"SO_LISTENQLIMIT", Const, 0, ""},
+ {"SO_MARK", Const, 0, ""},
+ {"SO_NETPROC", Const, 1, ""},
+ {"SO_NKE", Const, 0, ""},
+ {"SO_NOADDRERR", Const, 0, ""},
+ {"SO_NOHEADER", Const, 1, ""},
+ {"SO_NOSIGPIPE", Const, 0, ""},
+ {"SO_NOTIFYCONFLICT", Const, 0, ""},
+ {"SO_NO_CHECK", Const, 0, ""},
+ {"SO_NO_DDP", Const, 0, ""},
+ {"SO_NO_OFFLOAD", Const, 0, ""},
+ {"SO_NP_EXTENSIONS", Const, 0, ""},
+ {"SO_NREAD", Const, 0, ""},
+ {"SO_NUMRCVPKT", Const, 16, ""},
+ {"SO_NWRITE", Const, 0, ""},
+ {"SO_OOBINLINE", Const, 0, ""},
+ {"SO_OVERFLOWED", Const, 1, ""},
+ {"SO_PASSCRED", Const, 0, ""},
+ {"SO_PASSSEC", Const, 0, ""},
+ {"SO_PEERCRED", Const, 0, ""},
+ {"SO_PEERLABEL", Const, 0, ""},
+ {"SO_PEERNAME", Const, 0, ""},
+ {"SO_PEERSEC", Const, 0, ""},
+ {"SO_PRIORITY", Const, 0, ""},
+ {"SO_PROTOCOL", Const, 0, ""},
+ {"SO_PROTOTYPE", Const, 1, ""},
+ {"SO_RANDOMPORT", Const, 0, ""},
+ {"SO_RCVBUF", Const, 0, ""},
+ {"SO_RCVBUFFORCE", Const, 0, ""},
+ {"SO_RCVLOWAT", Const, 0, ""},
+ {"SO_RCVTIMEO", Const, 0, ""},
+ {"SO_RESTRICTIONS", Const, 0, ""},
+ {"SO_RESTRICT_DENYIN", Const, 0, ""},
+ {"SO_RESTRICT_DENYOUT", Const, 0, ""},
+ {"SO_RESTRICT_DENYSET", Const, 0, ""},
+ {"SO_REUSEADDR", Const, 0, ""},
+ {"SO_REUSEPORT", Const, 0, ""},
+ {"SO_REUSESHAREUID", Const, 0, ""},
+ {"SO_RTABLE", Const, 1, ""},
+ {"SO_RXQ_OVFL", Const, 0, ""},
+ {"SO_SECURITY_AUTHENTICATION", Const, 0, ""},
+ {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0, ""},
+ {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0, ""},
+ {"SO_SETFIB", Const, 0, ""},
+ {"SO_SNDBUF", Const, 0, ""},
+ {"SO_SNDBUFFORCE", Const, 0, ""},
+ {"SO_SNDLOWAT", Const, 0, ""},
+ {"SO_SNDTIMEO", Const, 0, ""},
+ {"SO_SPLICE", Const, 1, ""},
+ {"SO_TIMESTAMP", Const, 0, ""},
+ {"SO_TIMESTAMPING", Const, 0, ""},
+ {"SO_TIMESTAMPNS", Const, 0, ""},
+ {"SO_TIMESTAMP_MONOTONIC", Const, 0, ""},
+ {"SO_TYPE", Const, 0, ""},
+ {"SO_UPCALLCLOSEWAIT", Const, 0, ""},
+ {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0, ""},
+ {"SO_UPDATE_CONNECT_CONTEXT", Const, 1, ""},
+ {"SO_USELOOPBACK", Const, 0, ""},
+ {"SO_USER_COOKIE", Const, 1, ""},
+ {"SO_VENDOR", Const, 3, ""},
+ {"SO_WANTMORE", Const, 0, ""},
+ {"SO_WANTOOBFLAG", Const, 0, ""},
+ {"SSLExtraCertChainPolicyPara", Type, 0, ""},
+ {"SSLExtraCertChainPolicyPara.AuthType", Field, 0, ""},
+ {"SSLExtraCertChainPolicyPara.Checks", Field, 0, ""},
+ {"SSLExtraCertChainPolicyPara.ServerName", Field, 0, ""},
+ {"SSLExtraCertChainPolicyPara.Size", Field, 0, ""},
+ {"STANDARD_RIGHTS_ALL", Const, 0, ""},
+ {"STANDARD_RIGHTS_EXECUTE", Const, 0, ""},
+ {"STANDARD_RIGHTS_READ", Const, 0, ""},
+ {"STANDARD_RIGHTS_REQUIRED", Const, 0, ""},
+ {"STANDARD_RIGHTS_WRITE", Const, 0, ""},
+ {"STARTF_USESHOWWINDOW", Const, 0, ""},
+ {"STARTF_USESTDHANDLES", Const, 0, ""},
+ {"STD_ERROR_HANDLE", Const, 0, ""},
+ {"STD_INPUT_HANDLE", Const, 0, ""},
+ {"STD_OUTPUT_HANDLE", Const, 0, ""},
+ {"SUBLANG_ENGLISH_US", Const, 0, ""},
+ {"SW_FORCEMINIMIZE", Const, 0, ""},
+ {"SW_HIDE", Const, 0, ""},
+ {"SW_MAXIMIZE", Const, 0, ""},
+ {"SW_MINIMIZE", Const, 0, ""},
+ {"SW_NORMAL", Const, 0, ""},
+ {"SW_RESTORE", Const, 0, ""},
+ {"SW_SHOW", Const, 0, ""},
+ {"SW_SHOWDEFAULT", Const, 0, ""},
+ {"SW_SHOWMAXIMIZED", Const, 0, ""},
+ {"SW_SHOWMINIMIZED", Const, 0, ""},
+ {"SW_SHOWMINNOACTIVE", Const, 0, ""},
+ {"SW_SHOWNA", Const, 0, ""},
+ {"SW_SHOWNOACTIVATE", Const, 0, ""},
+ {"SW_SHOWNORMAL", Const, 0, ""},
+ {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4, ""},
+ {"SYNCHRONIZE", Const, 0, ""},
+ {"SYSCTL_VERSION", Const, 1, ""},
+ {"SYSCTL_VERS_0", Const, 1, ""},
+ {"SYSCTL_VERS_1", Const, 1, ""},
+ {"SYSCTL_VERS_MASK", Const, 1, ""},
+ {"SYS_ABORT2", Const, 0, ""},
+ {"SYS_ACCEPT", Const, 0, ""},
+ {"SYS_ACCEPT4", Const, 0, ""},
+ {"SYS_ACCEPT_NOCANCEL", Const, 0, ""},
+ {"SYS_ACCESS", Const, 0, ""},
+ {"SYS_ACCESS_EXTENDED", Const, 0, ""},
+ {"SYS_ACCT", Const, 0, ""},
+ {"SYS_ADD_KEY", Const, 0, ""},
+ {"SYS_ADD_PROFIL", Const, 0, ""},
+ {"SYS_ADJFREQ", Const, 1, ""},
+ {"SYS_ADJTIME", Const, 0, ""},
+ {"SYS_ADJTIMEX", Const, 0, ""},
+ {"SYS_AFS_SYSCALL", Const, 0, ""},
+ {"SYS_AIO_CANCEL", Const, 0, ""},
+ {"SYS_AIO_ERROR", Const, 0, ""},
+ {"SYS_AIO_FSYNC", Const, 0, ""},
+ {"SYS_AIO_MLOCK", Const, 14, ""},
+ {"SYS_AIO_READ", Const, 0, ""},
+ {"SYS_AIO_RETURN", Const, 0, ""},
+ {"SYS_AIO_SUSPEND", Const, 0, ""},
+ {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0, ""},
+ {"SYS_AIO_WAITCOMPLETE", Const, 14, ""},
+ {"SYS_AIO_WRITE", Const, 0, ""},
+ {"SYS_ALARM", Const, 0, ""},
+ {"SYS_ARCH_PRCTL", Const, 0, ""},
+ {"SYS_ARM_FADVISE64_64", Const, 0, ""},
+ {"SYS_ARM_SYNC_FILE_RANGE", Const, 0, ""},
+ {"SYS_ATGETMSG", Const, 0, ""},
+ {"SYS_ATPGETREQ", Const, 0, ""},
+ {"SYS_ATPGETRSP", Const, 0, ""},
+ {"SYS_ATPSNDREQ", Const, 0, ""},
+ {"SYS_ATPSNDRSP", Const, 0, ""},
+ {"SYS_ATPUTMSG", Const, 0, ""},
+ {"SYS_ATSOCKET", Const, 0, ""},
+ {"SYS_AUDIT", Const, 0, ""},
+ {"SYS_AUDITCTL", Const, 0, ""},
+ {"SYS_AUDITON", Const, 0, ""},
+ {"SYS_AUDIT_SESSION_JOIN", Const, 0, ""},
+ {"SYS_AUDIT_SESSION_PORT", Const, 0, ""},
+ {"SYS_AUDIT_SESSION_SELF", Const, 0, ""},
+ {"SYS_BDFLUSH", Const, 0, ""},
+ {"SYS_BIND", Const, 0, ""},
+ {"SYS_BINDAT", Const, 3, ""},
+ {"SYS_BREAK", Const, 0, ""},
+ {"SYS_BRK", Const, 0, ""},
+ {"SYS_BSDTHREAD_CREATE", Const, 0, ""},
+ {"SYS_BSDTHREAD_REGISTER", Const, 0, ""},
+ {"SYS_BSDTHREAD_TERMINATE", Const, 0, ""},
+ {"SYS_CAPGET", Const, 0, ""},
+ {"SYS_CAPSET", Const, 0, ""},
+ {"SYS_CAP_ENTER", Const, 0, ""},
+ {"SYS_CAP_FCNTLS_GET", Const, 1, ""},
+ {"SYS_CAP_FCNTLS_LIMIT", Const, 1, ""},
+ {"SYS_CAP_GETMODE", Const, 0, ""},
+ {"SYS_CAP_GETRIGHTS", Const, 0, ""},
+ {"SYS_CAP_IOCTLS_GET", Const, 1, ""},
+ {"SYS_CAP_IOCTLS_LIMIT", Const, 1, ""},
+ {"SYS_CAP_NEW", Const, 0, ""},
+ {"SYS_CAP_RIGHTS_GET", Const, 1, ""},
+ {"SYS_CAP_RIGHTS_LIMIT", Const, 1, ""},
+ {"SYS_CHDIR", Const, 0, ""},
+ {"SYS_CHFLAGS", Const, 0, ""},
+ {"SYS_CHFLAGSAT", Const, 3, ""},
+ {"SYS_CHMOD", Const, 0, ""},
+ {"SYS_CHMOD_EXTENDED", Const, 0, ""},
+ {"SYS_CHOWN", Const, 0, ""},
+ {"SYS_CHOWN32", Const, 0, ""},
+ {"SYS_CHROOT", Const, 0, ""},
+ {"SYS_CHUD", Const, 0, ""},
+ {"SYS_CLOCK_ADJTIME", Const, 0, ""},
+ {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1, ""},
+ {"SYS_CLOCK_GETRES", Const, 0, ""},
+ {"SYS_CLOCK_GETTIME", Const, 0, ""},
+ {"SYS_CLOCK_NANOSLEEP", Const, 0, ""},
+ {"SYS_CLOCK_SETTIME", Const, 0, ""},
+ {"SYS_CLONE", Const, 0, ""},
+ {"SYS_CLOSE", Const, 0, ""},
+ {"SYS_CLOSEFROM", Const, 0, ""},
+ {"SYS_CLOSE_NOCANCEL", Const, 0, ""},
+ {"SYS_CONNECT", Const, 0, ""},
+ {"SYS_CONNECTAT", Const, 3, ""},
+ {"SYS_CONNECT_NOCANCEL", Const, 0, ""},
+ {"SYS_COPYFILE", Const, 0, ""},
+ {"SYS_CPUSET", Const, 0, ""},
+ {"SYS_CPUSET_GETAFFINITY", Const, 0, ""},
+ {"SYS_CPUSET_GETID", Const, 0, ""},
+ {"SYS_CPUSET_SETAFFINITY", Const, 0, ""},
+ {"SYS_CPUSET_SETID", Const, 0, ""},
+ {"SYS_CREAT", Const, 0, ""},
+ {"SYS_CREATE_MODULE", Const, 0, ""},
+ {"SYS_CSOPS", Const, 0, ""},
+ {"SYS_CSOPS_AUDITTOKEN", Const, 16, ""},
+ {"SYS_DELETE", Const, 0, ""},
+ {"SYS_DELETE_MODULE", Const, 0, ""},
+ {"SYS_DUP", Const, 0, ""},
+ {"SYS_DUP2", Const, 0, ""},
+ {"SYS_DUP3", Const, 0, ""},
+ {"SYS_EACCESS", Const, 0, ""},
+ {"SYS_EPOLL_CREATE", Const, 0, ""},
+ {"SYS_EPOLL_CREATE1", Const, 0, ""},
+ {"SYS_EPOLL_CTL", Const, 0, ""},
+ {"SYS_EPOLL_CTL_OLD", Const, 0, ""},
+ {"SYS_EPOLL_PWAIT", Const, 0, ""},
+ {"SYS_EPOLL_WAIT", Const, 0, ""},
+ {"SYS_EPOLL_WAIT_OLD", Const, 0, ""},
+ {"SYS_EVENTFD", Const, 0, ""},
+ {"SYS_EVENTFD2", Const, 0, ""},
+ {"SYS_EXCHANGEDATA", Const, 0, ""},
+ {"SYS_EXECVE", Const, 0, ""},
+ {"SYS_EXIT", Const, 0, ""},
+ {"SYS_EXIT_GROUP", Const, 0, ""},
+ {"SYS_EXTATTRCTL", Const, 0, ""},
+ {"SYS_EXTATTR_DELETE_FD", Const, 0, ""},
+ {"SYS_EXTATTR_DELETE_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_DELETE_LINK", Const, 0, ""},
+ {"SYS_EXTATTR_GET_FD", Const, 0, ""},
+ {"SYS_EXTATTR_GET_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_GET_LINK", Const, 0, ""},
+ {"SYS_EXTATTR_LIST_FD", Const, 0, ""},
+ {"SYS_EXTATTR_LIST_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_LIST_LINK", Const, 0, ""},
+ {"SYS_EXTATTR_SET_FD", Const, 0, ""},
+ {"SYS_EXTATTR_SET_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_SET_LINK", Const, 0, ""},
+ {"SYS_FACCESSAT", Const, 0, ""},
+ {"SYS_FADVISE64", Const, 0, ""},
+ {"SYS_FADVISE64_64", Const, 0, ""},
+ {"SYS_FALLOCATE", Const, 0, ""},
+ {"SYS_FANOTIFY_INIT", Const, 0, ""},
+ {"SYS_FANOTIFY_MARK", Const, 0, ""},
+ {"SYS_FCHDIR", Const, 0, ""},
+ {"SYS_FCHFLAGS", Const, 0, ""},
+ {"SYS_FCHMOD", Const, 0, ""},
+ {"SYS_FCHMODAT", Const, 0, ""},
+ {"SYS_FCHMOD_EXTENDED", Const, 0, ""},
+ {"SYS_FCHOWN", Const, 0, ""},
+ {"SYS_FCHOWN32", Const, 0, ""},
+ {"SYS_FCHOWNAT", Const, 0, ""},
+ {"SYS_FCHROOT", Const, 1, ""},
+ {"SYS_FCNTL", Const, 0, ""},
+ {"SYS_FCNTL64", Const, 0, ""},
+ {"SYS_FCNTL_NOCANCEL", Const, 0, ""},
+ {"SYS_FDATASYNC", Const, 0, ""},
+ {"SYS_FEXECVE", Const, 0, ""},
+ {"SYS_FFCLOCK_GETCOUNTER", Const, 0, ""},
+ {"SYS_FFCLOCK_GETESTIMATE", Const, 0, ""},
+ {"SYS_FFCLOCK_SETESTIMATE", Const, 0, ""},
+ {"SYS_FFSCTL", Const, 0, ""},
+ {"SYS_FGETATTRLIST", Const, 0, ""},
+ {"SYS_FGETXATTR", Const, 0, ""},
+ {"SYS_FHOPEN", Const, 0, ""},
+ {"SYS_FHSTAT", Const, 0, ""},
+ {"SYS_FHSTATFS", Const, 0, ""},
+ {"SYS_FILEPORT_MAKEFD", Const, 0, ""},
+ {"SYS_FILEPORT_MAKEPORT", Const, 0, ""},
+ {"SYS_FKTRACE", Const, 1, ""},
+ {"SYS_FLISTXATTR", Const, 0, ""},
+ {"SYS_FLOCK", Const, 0, ""},
+ {"SYS_FORK", Const, 0, ""},
+ {"SYS_FPATHCONF", Const, 0, ""},
+ {"SYS_FREEBSD6_FTRUNCATE", Const, 0, ""},
+ {"SYS_FREEBSD6_LSEEK", Const, 0, ""},
+ {"SYS_FREEBSD6_MMAP", Const, 0, ""},
+ {"SYS_FREEBSD6_PREAD", Const, 0, ""},
+ {"SYS_FREEBSD6_PWRITE", Const, 0, ""},
+ {"SYS_FREEBSD6_TRUNCATE", Const, 0, ""},
+ {"SYS_FREMOVEXATTR", Const, 0, ""},
+ {"SYS_FSCTL", Const, 0, ""},
+ {"SYS_FSETATTRLIST", Const, 0, ""},
+ {"SYS_FSETXATTR", Const, 0, ""},
+ {"SYS_FSGETPATH", Const, 0, ""},
+ {"SYS_FSTAT", Const, 0, ""},
+ {"SYS_FSTAT64", Const, 0, ""},
+ {"SYS_FSTAT64_EXTENDED", Const, 0, ""},
+ {"SYS_FSTATAT", Const, 0, ""},
+ {"SYS_FSTATAT64", Const, 0, ""},
+ {"SYS_FSTATFS", Const, 0, ""},
+ {"SYS_FSTATFS64", Const, 0, ""},
+ {"SYS_FSTATV", Const, 0, ""},
+ {"SYS_FSTATVFS1", Const, 1, ""},
+ {"SYS_FSTAT_EXTENDED", Const, 0, ""},
+ {"SYS_FSYNC", Const, 0, ""},
+ {"SYS_FSYNC_NOCANCEL", Const, 0, ""},
+ {"SYS_FSYNC_RANGE", Const, 1, ""},
+ {"SYS_FTIME", Const, 0, ""},
+ {"SYS_FTRUNCATE", Const, 0, ""},
+ {"SYS_FTRUNCATE64", Const, 0, ""},
+ {"SYS_FUTEX", Const, 0, ""},
+ {"SYS_FUTIMENS", Const, 1, ""},
+ {"SYS_FUTIMES", Const, 0, ""},
+ {"SYS_FUTIMESAT", Const, 0, ""},
+ {"SYS_GETATTRLIST", Const, 0, ""},
+ {"SYS_GETAUDIT", Const, 0, ""},
+ {"SYS_GETAUDIT_ADDR", Const, 0, ""},
+ {"SYS_GETAUID", Const, 0, ""},
+ {"SYS_GETCONTEXT", Const, 0, ""},
+ {"SYS_GETCPU", Const, 0, ""},
+ {"SYS_GETCWD", Const, 0, ""},
+ {"SYS_GETDENTS", Const, 0, ""},
+ {"SYS_GETDENTS64", Const, 0, ""},
+ {"SYS_GETDIRENTRIES", Const, 0, ""},
+ {"SYS_GETDIRENTRIES64", Const, 0, ""},
+ {"SYS_GETDIRENTRIESATTR", Const, 0, ""},
+ {"SYS_GETDTABLECOUNT", Const, 1, ""},
+ {"SYS_GETDTABLESIZE", Const, 0, ""},
+ {"SYS_GETEGID", Const, 0, ""},
+ {"SYS_GETEGID32", Const, 0, ""},
+ {"SYS_GETEUID", Const, 0, ""},
+ {"SYS_GETEUID32", Const, 0, ""},
+ {"SYS_GETFH", Const, 0, ""},
+ {"SYS_GETFSSTAT", Const, 0, ""},
+ {"SYS_GETFSSTAT64", Const, 0, ""},
+ {"SYS_GETGID", Const, 0, ""},
+ {"SYS_GETGID32", Const, 0, ""},
+ {"SYS_GETGROUPS", Const, 0, ""},
+ {"SYS_GETGROUPS32", Const, 0, ""},
+ {"SYS_GETHOSTUUID", Const, 0, ""},
+ {"SYS_GETITIMER", Const, 0, ""},
+ {"SYS_GETLCID", Const, 0, ""},
+ {"SYS_GETLOGIN", Const, 0, ""},
+ {"SYS_GETLOGINCLASS", Const, 0, ""},
+ {"SYS_GETPEERNAME", Const, 0, ""},
+ {"SYS_GETPGID", Const, 0, ""},
+ {"SYS_GETPGRP", Const, 0, ""},
+ {"SYS_GETPID", Const, 0, ""},
+ {"SYS_GETPMSG", Const, 0, ""},
+ {"SYS_GETPPID", Const, 0, ""},
+ {"SYS_GETPRIORITY", Const, 0, ""},
+ {"SYS_GETRESGID", Const, 0, ""},
+ {"SYS_GETRESGID32", Const, 0, ""},
+ {"SYS_GETRESUID", Const, 0, ""},
+ {"SYS_GETRESUID32", Const, 0, ""},
+ {"SYS_GETRLIMIT", Const, 0, ""},
+ {"SYS_GETRTABLE", Const, 1, ""},
+ {"SYS_GETRUSAGE", Const, 0, ""},
+ {"SYS_GETSGROUPS", Const, 0, ""},
+ {"SYS_GETSID", Const, 0, ""},
+ {"SYS_GETSOCKNAME", Const, 0, ""},
+ {"SYS_GETSOCKOPT", Const, 0, ""},
+ {"SYS_GETTHRID", Const, 1, ""},
+ {"SYS_GETTID", Const, 0, ""},
+ {"SYS_GETTIMEOFDAY", Const, 0, ""},
+ {"SYS_GETUID", Const, 0, ""},
+ {"SYS_GETUID32", Const, 0, ""},
+ {"SYS_GETVFSSTAT", Const, 1, ""},
+ {"SYS_GETWGROUPS", Const, 0, ""},
+ {"SYS_GETXATTR", Const, 0, ""},
+ {"SYS_GET_KERNEL_SYMS", Const, 0, ""},
+ {"SYS_GET_MEMPOLICY", Const, 0, ""},
+ {"SYS_GET_ROBUST_LIST", Const, 0, ""},
+ {"SYS_GET_THREAD_AREA", Const, 0, ""},
+ {"SYS_GSSD_SYSCALL", Const, 14, ""},
+ {"SYS_GTTY", Const, 0, ""},
+ {"SYS_IDENTITYSVC", Const, 0, ""},
+ {"SYS_IDLE", Const, 0, ""},
+ {"SYS_INITGROUPS", Const, 0, ""},
+ {"SYS_INIT_MODULE", Const, 0, ""},
+ {"SYS_INOTIFY_ADD_WATCH", Const, 0, ""},
+ {"SYS_INOTIFY_INIT", Const, 0, ""},
+ {"SYS_INOTIFY_INIT1", Const, 0, ""},
+ {"SYS_INOTIFY_RM_WATCH", Const, 0, ""},
+ {"SYS_IOCTL", Const, 0, ""},
+ {"SYS_IOPERM", Const, 0, ""},
+ {"SYS_IOPL", Const, 0, ""},
+ {"SYS_IOPOLICYSYS", Const, 0, ""},
+ {"SYS_IOPRIO_GET", Const, 0, ""},
+ {"SYS_IOPRIO_SET", Const, 0, ""},
+ {"SYS_IO_CANCEL", Const, 0, ""},
+ {"SYS_IO_DESTROY", Const, 0, ""},
+ {"SYS_IO_GETEVENTS", Const, 0, ""},
+ {"SYS_IO_SETUP", Const, 0, ""},
+ {"SYS_IO_SUBMIT", Const, 0, ""},
+ {"SYS_IPC", Const, 0, ""},
+ {"SYS_ISSETUGID", Const, 0, ""},
+ {"SYS_JAIL", Const, 0, ""},
+ {"SYS_JAIL_ATTACH", Const, 0, ""},
+ {"SYS_JAIL_GET", Const, 0, ""},
+ {"SYS_JAIL_REMOVE", Const, 0, ""},
+ {"SYS_JAIL_SET", Const, 0, ""},
+ {"SYS_KAS_INFO", Const, 16, ""},
+ {"SYS_KDEBUG_TRACE", Const, 0, ""},
+ {"SYS_KENV", Const, 0, ""},
+ {"SYS_KEVENT", Const, 0, ""},
+ {"SYS_KEVENT64", Const, 0, ""},
+ {"SYS_KEXEC_LOAD", Const, 0, ""},
+ {"SYS_KEYCTL", Const, 0, ""},
+ {"SYS_KILL", Const, 0, ""},
+ {"SYS_KLDFIND", Const, 0, ""},
+ {"SYS_KLDFIRSTMOD", Const, 0, ""},
+ {"SYS_KLDLOAD", Const, 0, ""},
+ {"SYS_KLDNEXT", Const, 0, ""},
+ {"SYS_KLDSTAT", Const, 0, ""},
+ {"SYS_KLDSYM", Const, 0, ""},
+ {"SYS_KLDUNLOAD", Const, 0, ""},
+ {"SYS_KLDUNLOADF", Const, 0, ""},
+ {"SYS_KMQ_NOTIFY", Const, 14, ""},
+ {"SYS_KMQ_OPEN", Const, 14, ""},
+ {"SYS_KMQ_SETATTR", Const, 14, ""},
+ {"SYS_KMQ_TIMEDRECEIVE", Const, 14, ""},
+ {"SYS_KMQ_TIMEDSEND", Const, 14, ""},
+ {"SYS_KMQ_UNLINK", Const, 14, ""},
+ {"SYS_KQUEUE", Const, 0, ""},
+ {"SYS_KQUEUE1", Const, 1, ""},
+ {"SYS_KSEM_CLOSE", Const, 14, ""},
+ {"SYS_KSEM_DESTROY", Const, 14, ""},
+ {"SYS_KSEM_GETVALUE", Const, 14, ""},
+ {"SYS_KSEM_INIT", Const, 14, ""},
+ {"SYS_KSEM_OPEN", Const, 14, ""},
+ {"SYS_KSEM_POST", Const, 14, ""},
+ {"SYS_KSEM_TIMEDWAIT", Const, 14, ""},
+ {"SYS_KSEM_TRYWAIT", Const, 14, ""},
+ {"SYS_KSEM_UNLINK", Const, 14, ""},
+ {"SYS_KSEM_WAIT", Const, 14, ""},
+ {"SYS_KTIMER_CREATE", Const, 0, ""},
+ {"SYS_KTIMER_DELETE", Const, 0, ""},
+ {"SYS_KTIMER_GETOVERRUN", Const, 0, ""},
+ {"SYS_KTIMER_GETTIME", Const, 0, ""},
+ {"SYS_KTIMER_SETTIME", Const, 0, ""},
+ {"SYS_KTRACE", Const, 0, ""},
+ {"SYS_LCHFLAGS", Const, 0, ""},
+ {"SYS_LCHMOD", Const, 0, ""},
+ {"SYS_LCHOWN", Const, 0, ""},
+ {"SYS_LCHOWN32", Const, 0, ""},
+ {"SYS_LEDGER", Const, 16, ""},
+ {"SYS_LGETFH", Const, 0, ""},
+ {"SYS_LGETXATTR", Const, 0, ""},
+ {"SYS_LINK", Const, 0, ""},
+ {"SYS_LINKAT", Const, 0, ""},
+ {"SYS_LIO_LISTIO", Const, 0, ""},
+ {"SYS_LISTEN", Const, 0, ""},
+ {"SYS_LISTXATTR", Const, 0, ""},
+ {"SYS_LLISTXATTR", Const, 0, ""},
+ {"SYS_LOCK", Const, 0, ""},
+ {"SYS_LOOKUP_DCOOKIE", Const, 0, ""},
+ {"SYS_LPATHCONF", Const, 0, ""},
+ {"SYS_LREMOVEXATTR", Const, 0, ""},
+ {"SYS_LSEEK", Const, 0, ""},
+ {"SYS_LSETXATTR", Const, 0, ""},
+ {"SYS_LSTAT", Const, 0, ""},
+ {"SYS_LSTAT64", Const, 0, ""},
+ {"SYS_LSTAT64_EXTENDED", Const, 0, ""},
+ {"SYS_LSTATV", Const, 0, ""},
+ {"SYS_LSTAT_EXTENDED", Const, 0, ""},
+ {"SYS_LUTIMES", Const, 0, ""},
+ {"SYS_MAC_SYSCALL", Const, 0, ""},
+ {"SYS_MADVISE", Const, 0, ""},
+ {"SYS_MADVISE1", Const, 0, ""},
+ {"SYS_MAXSYSCALL", Const, 0, ""},
+ {"SYS_MBIND", Const, 0, ""},
+ {"SYS_MIGRATE_PAGES", Const, 0, ""},
+ {"SYS_MINCORE", Const, 0, ""},
+ {"SYS_MINHERIT", Const, 0, ""},
+ {"SYS_MKCOMPLEX", Const, 0, ""},
+ {"SYS_MKDIR", Const, 0, ""},
+ {"SYS_MKDIRAT", Const, 0, ""},
+ {"SYS_MKDIR_EXTENDED", Const, 0, ""},
+ {"SYS_MKFIFO", Const, 0, ""},
+ {"SYS_MKFIFOAT", Const, 0, ""},
+ {"SYS_MKFIFO_EXTENDED", Const, 0, ""},
+ {"SYS_MKNOD", Const, 0, ""},
+ {"SYS_MKNODAT", Const, 0, ""},
+ {"SYS_MLOCK", Const, 0, ""},
+ {"SYS_MLOCKALL", Const, 0, ""},
+ {"SYS_MMAP", Const, 0, ""},
+ {"SYS_MMAP2", Const, 0, ""},
+ {"SYS_MODCTL", Const, 1, ""},
+ {"SYS_MODFIND", Const, 0, ""},
+ {"SYS_MODFNEXT", Const, 0, ""},
+ {"SYS_MODIFY_LDT", Const, 0, ""},
+ {"SYS_MODNEXT", Const, 0, ""},
+ {"SYS_MODSTAT", Const, 0, ""},
+ {"SYS_MODWATCH", Const, 0, ""},
+ {"SYS_MOUNT", Const, 0, ""},
+ {"SYS_MOVE_PAGES", Const, 0, ""},
+ {"SYS_MPROTECT", Const, 0, ""},
+ {"SYS_MPX", Const, 0, ""},
+ {"SYS_MQUERY", Const, 1, ""},
+ {"SYS_MQ_GETSETATTR", Const, 0, ""},
+ {"SYS_MQ_NOTIFY", Const, 0, ""},
+ {"SYS_MQ_OPEN", Const, 0, ""},
+ {"SYS_MQ_TIMEDRECEIVE", Const, 0, ""},
+ {"SYS_MQ_TIMEDSEND", Const, 0, ""},
+ {"SYS_MQ_UNLINK", Const, 0, ""},
+ {"SYS_MREMAP", Const, 0, ""},
+ {"SYS_MSGCTL", Const, 0, ""},
+ {"SYS_MSGGET", Const, 0, ""},
+ {"SYS_MSGRCV", Const, 0, ""},
+ {"SYS_MSGRCV_NOCANCEL", Const, 0, ""},
+ {"SYS_MSGSND", Const, 0, ""},
+ {"SYS_MSGSND_NOCANCEL", Const, 0, ""},
+ {"SYS_MSGSYS", Const, 0, ""},
+ {"SYS_MSYNC", Const, 0, ""},
+ {"SYS_MSYNC_NOCANCEL", Const, 0, ""},
+ {"SYS_MUNLOCK", Const, 0, ""},
+ {"SYS_MUNLOCKALL", Const, 0, ""},
+ {"SYS_MUNMAP", Const, 0, ""},
+ {"SYS_NAME_TO_HANDLE_AT", Const, 0, ""},
+ {"SYS_NANOSLEEP", Const, 0, ""},
+ {"SYS_NEWFSTATAT", Const, 0, ""},
+ {"SYS_NFSCLNT", Const, 0, ""},
+ {"SYS_NFSSERVCTL", Const, 0, ""},
+ {"SYS_NFSSVC", Const, 0, ""},
+ {"SYS_NFSTAT", Const, 0, ""},
+ {"SYS_NICE", Const, 0, ""},
+ {"SYS_NLM_SYSCALL", Const, 14, ""},
+ {"SYS_NLSTAT", Const, 0, ""},
+ {"SYS_NMOUNT", Const, 0, ""},
+ {"SYS_NSTAT", Const, 0, ""},
+ {"SYS_NTP_ADJTIME", Const, 0, ""},
+ {"SYS_NTP_GETTIME", Const, 0, ""},
+ {"SYS_NUMA_GETAFFINITY", Const, 14, ""},
+ {"SYS_NUMA_SETAFFINITY", Const, 14, ""},
+ {"SYS_OABI_SYSCALL_BASE", Const, 0, ""},
+ {"SYS_OBREAK", Const, 0, ""},
+ {"SYS_OLDFSTAT", Const, 0, ""},
+ {"SYS_OLDLSTAT", Const, 0, ""},
+ {"SYS_OLDOLDUNAME", Const, 0, ""},
+ {"SYS_OLDSTAT", Const, 0, ""},
+ {"SYS_OLDUNAME", Const, 0, ""},
+ {"SYS_OPEN", Const, 0, ""},
+ {"SYS_OPENAT", Const, 0, ""},
+ {"SYS_OPENBSD_POLL", Const, 0, ""},
+ {"SYS_OPEN_BY_HANDLE_AT", Const, 0, ""},
+ {"SYS_OPEN_DPROTECTED_NP", Const, 16, ""},
+ {"SYS_OPEN_EXTENDED", Const, 0, ""},
+ {"SYS_OPEN_NOCANCEL", Const, 0, ""},
+ {"SYS_OVADVISE", Const, 0, ""},
+ {"SYS_PACCEPT", Const, 1, ""},
+ {"SYS_PATHCONF", Const, 0, ""},
+ {"SYS_PAUSE", Const, 0, ""},
+ {"SYS_PCICONFIG_IOBASE", Const, 0, ""},
+ {"SYS_PCICONFIG_READ", Const, 0, ""},
+ {"SYS_PCICONFIG_WRITE", Const, 0, ""},
+ {"SYS_PDFORK", Const, 0, ""},
+ {"SYS_PDGETPID", Const, 0, ""},
+ {"SYS_PDKILL", Const, 0, ""},
+ {"SYS_PERF_EVENT_OPEN", Const, 0, ""},
+ {"SYS_PERSONALITY", Const, 0, ""},
+ {"SYS_PID_HIBERNATE", Const, 0, ""},
+ {"SYS_PID_RESUME", Const, 0, ""},
+ {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0, ""},
+ {"SYS_PID_SUSPEND", Const, 0, ""},
+ {"SYS_PIPE", Const, 0, ""},
+ {"SYS_PIPE2", Const, 0, ""},
+ {"SYS_PIVOT_ROOT", Const, 0, ""},
+ {"SYS_PMC_CONTROL", Const, 1, ""},
+ {"SYS_PMC_GET_INFO", Const, 1, ""},
+ {"SYS_POLL", Const, 0, ""},
+ {"SYS_POLLTS", Const, 1, ""},
+ {"SYS_POLL_NOCANCEL", Const, 0, ""},
+ {"SYS_POSIX_FADVISE", Const, 0, ""},
+ {"SYS_POSIX_FALLOCATE", Const, 0, ""},
+ {"SYS_POSIX_OPENPT", Const, 0, ""},
+ {"SYS_POSIX_SPAWN", Const, 0, ""},
+ {"SYS_PPOLL", Const, 0, ""},
+ {"SYS_PRCTL", Const, 0, ""},
+ {"SYS_PREAD", Const, 0, ""},
+ {"SYS_PREAD64", Const, 0, ""},
+ {"SYS_PREADV", Const, 0, ""},
+ {"SYS_PREAD_NOCANCEL", Const, 0, ""},
+ {"SYS_PRLIMIT64", Const, 0, ""},
+ {"SYS_PROCCTL", Const, 3, ""},
+ {"SYS_PROCESS_POLICY", Const, 0, ""},
+ {"SYS_PROCESS_VM_READV", Const, 0, ""},
+ {"SYS_PROCESS_VM_WRITEV", Const, 0, ""},
+ {"SYS_PROC_INFO", Const, 0, ""},
+ {"SYS_PROF", Const, 0, ""},
+ {"SYS_PROFIL", Const, 0, ""},
+ {"SYS_PSELECT", Const, 0, ""},
+ {"SYS_PSELECT6", Const, 0, ""},
+ {"SYS_PSET_ASSIGN", Const, 1, ""},
+ {"SYS_PSET_CREATE", Const, 1, ""},
+ {"SYS_PSET_DESTROY", Const, 1, ""},
+ {"SYS_PSYNCH_CVBROAD", Const, 0, ""},
+ {"SYS_PSYNCH_CVCLRPREPOST", Const, 0, ""},
+ {"SYS_PSYNCH_CVSIGNAL", Const, 0, ""},
+ {"SYS_PSYNCH_CVWAIT", Const, 0, ""},
+ {"SYS_PSYNCH_MUTEXDROP", Const, 0, ""},
+ {"SYS_PSYNCH_MUTEXWAIT", Const, 0, ""},
+ {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0, ""},
+ {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_RDLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_UNLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_UNLOCK2", Const, 0, ""},
+ {"SYS_PSYNCH_RW_UPGRADE", Const, 0, ""},
+ {"SYS_PSYNCH_RW_WRLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0, ""},
+ {"SYS_PTRACE", Const, 0, ""},
+ {"SYS_PUTPMSG", Const, 0, ""},
+ {"SYS_PWRITE", Const, 0, ""},
+ {"SYS_PWRITE64", Const, 0, ""},
+ {"SYS_PWRITEV", Const, 0, ""},
+ {"SYS_PWRITE_NOCANCEL", Const, 0, ""},
+ {"SYS_QUERY_MODULE", Const, 0, ""},
+ {"SYS_QUOTACTL", Const, 0, ""},
+ {"SYS_RASCTL", Const, 1, ""},
+ {"SYS_RCTL_ADD_RULE", Const, 0, ""},
+ {"SYS_RCTL_GET_LIMITS", Const, 0, ""},
+ {"SYS_RCTL_GET_RACCT", Const, 0, ""},
+ {"SYS_RCTL_GET_RULES", Const, 0, ""},
+ {"SYS_RCTL_REMOVE_RULE", Const, 0, ""},
+ {"SYS_READ", Const, 0, ""},
+ {"SYS_READAHEAD", Const, 0, ""},
+ {"SYS_READDIR", Const, 0, ""},
+ {"SYS_READLINK", Const, 0, ""},
+ {"SYS_READLINKAT", Const, 0, ""},
+ {"SYS_READV", Const, 0, ""},
+ {"SYS_READV_NOCANCEL", Const, 0, ""},
+ {"SYS_READ_NOCANCEL", Const, 0, ""},
+ {"SYS_REBOOT", Const, 0, ""},
+ {"SYS_RECV", Const, 0, ""},
+ {"SYS_RECVFROM", Const, 0, ""},
+ {"SYS_RECVFROM_NOCANCEL", Const, 0, ""},
+ {"SYS_RECVMMSG", Const, 0, ""},
+ {"SYS_RECVMSG", Const, 0, ""},
+ {"SYS_RECVMSG_NOCANCEL", Const, 0, ""},
+ {"SYS_REMAP_FILE_PAGES", Const, 0, ""},
+ {"SYS_REMOVEXATTR", Const, 0, ""},
+ {"SYS_RENAME", Const, 0, ""},
+ {"SYS_RENAMEAT", Const, 0, ""},
+ {"SYS_REQUEST_KEY", Const, 0, ""},
+ {"SYS_RESTART_SYSCALL", Const, 0, ""},
+ {"SYS_REVOKE", Const, 0, ""},
+ {"SYS_RFORK", Const, 0, ""},
+ {"SYS_RMDIR", Const, 0, ""},
+ {"SYS_RTPRIO", Const, 0, ""},
+ {"SYS_RTPRIO_THREAD", Const, 0, ""},
+ {"SYS_RT_SIGACTION", Const, 0, ""},
+ {"SYS_RT_SIGPENDING", Const, 0, ""},
+ {"SYS_RT_SIGPROCMASK", Const, 0, ""},
+ {"SYS_RT_SIGQUEUEINFO", Const, 0, ""},
+ {"SYS_RT_SIGRETURN", Const, 0, ""},
+ {"SYS_RT_SIGSUSPEND", Const, 0, ""},
+ {"SYS_RT_SIGTIMEDWAIT", Const, 0, ""},
+ {"SYS_RT_TGSIGQUEUEINFO", Const, 0, ""},
+ {"SYS_SBRK", Const, 0, ""},
+ {"SYS_SCHED_GETAFFINITY", Const, 0, ""},
+ {"SYS_SCHED_GETPARAM", Const, 0, ""},
+ {"SYS_SCHED_GETSCHEDULER", Const, 0, ""},
+ {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0, ""},
+ {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0, ""},
+ {"SYS_SCHED_RR_GET_INTERVAL", Const, 0, ""},
+ {"SYS_SCHED_SETAFFINITY", Const, 0, ""},
+ {"SYS_SCHED_SETPARAM", Const, 0, ""},
+ {"SYS_SCHED_SETSCHEDULER", Const, 0, ""},
+ {"SYS_SCHED_YIELD", Const, 0, ""},
+ {"SYS_SCTP_GENERIC_RECVMSG", Const, 0, ""},
+ {"SYS_SCTP_GENERIC_SENDMSG", Const, 0, ""},
+ {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0, ""},
+ {"SYS_SCTP_PEELOFF", Const, 0, ""},
+ {"SYS_SEARCHFS", Const, 0, ""},
+ {"SYS_SECURITY", Const, 0, ""},
+ {"SYS_SELECT", Const, 0, ""},
+ {"SYS_SELECT_NOCANCEL", Const, 0, ""},
+ {"SYS_SEMCONFIG", Const, 1, ""},
+ {"SYS_SEMCTL", Const, 0, ""},
+ {"SYS_SEMGET", Const, 0, ""},
+ {"SYS_SEMOP", Const, 0, ""},
+ {"SYS_SEMSYS", Const, 0, ""},
+ {"SYS_SEMTIMEDOP", Const, 0, ""},
+ {"SYS_SEM_CLOSE", Const, 0, ""},
+ {"SYS_SEM_DESTROY", Const, 0, ""},
+ {"SYS_SEM_GETVALUE", Const, 0, ""},
+ {"SYS_SEM_INIT", Const, 0, ""},
+ {"SYS_SEM_OPEN", Const, 0, ""},
+ {"SYS_SEM_POST", Const, 0, ""},
+ {"SYS_SEM_TRYWAIT", Const, 0, ""},
+ {"SYS_SEM_UNLINK", Const, 0, ""},
+ {"SYS_SEM_WAIT", Const, 0, ""},
+ {"SYS_SEM_WAIT_NOCANCEL", Const, 0, ""},
+ {"SYS_SEND", Const, 0, ""},
+ {"SYS_SENDFILE", Const, 0, ""},
+ {"SYS_SENDFILE64", Const, 0, ""},
+ {"SYS_SENDMMSG", Const, 0, ""},
+ {"SYS_SENDMSG", Const, 0, ""},
+ {"SYS_SENDMSG_NOCANCEL", Const, 0, ""},
+ {"SYS_SENDTO", Const, 0, ""},
+ {"SYS_SENDTO_NOCANCEL", Const, 0, ""},
+ {"SYS_SETATTRLIST", Const, 0, ""},
+ {"SYS_SETAUDIT", Const, 0, ""},
+ {"SYS_SETAUDIT_ADDR", Const, 0, ""},
+ {"SYS_SETAUID", Const, 0, ""},
+ {"SYS_SETCONTEXT", Const, 0, ""},
+ {"SYS_SETDOMAINNAME", Const, 0, ""},
+ {"SYS_SETEGID", Const, 0, ""},
+ {"SYS_SETEUID", Const, 0, ""},
+ {"SYS_SETFIB", Const, 0, ""},
+ {"SYS_SETFSGID", Const, 0, ""},
+ {"SYS_SETFSGID32", Const, 0, ""},
+ {"SYS_SETFSUID", Const, 0, ""},
+ {"SYS_SETFSUID32", Const, 0, ""},
+ {"SYS_SETGID", Const, 0, ""},
+ {"SYS_SETGID32", Const, 0, ""},
+ {"SYS_SETGROUPS", Const, 0, ""},
+ {"SYS_SETGROUPS32", Const, 0, ""},
+ {"SYS_SETHOSTNAME", Const, 0, ""},
+ {"SYS_SETITIMER", Const, 0, ""},
+ {"SYS_SETLCID", Const, 0, ""},
+ {"SYS_SETLOGIN", Const, 0, ""},
+ {"SYS_SETLOGINCLASS", Const, 0, ""},
+ {"SYS_SETNS", Const, 0, ""},
+ {"SYS_SETPGID", Const, 0, ""},
+ {"SYS_SETPRIORITY", Const, 0, ""},
+ {"SYS_SETPRIVEXEC", Const, 0, ""},
+ {"SYS_SETREGID", Const, 0, ""},
+ {"SYS_SETREGID32", Const, 0, ""},
+ {"SYS_SETRESGID", Const, 0, ""},
+ {"SYS_SETRESGID32", Const, 0, ""},
+ {"SYS_SETRESUID", Const, 0, ""},
+ {"SYS_SETRESUID32", Const, 0, ""},
+ {"SYS_SETREUID", Const, 0, ""},
+ {"SYS_SETREUID32", Const, 0, ""},
+ {"SYS_SETRLIMIT", Const, 0, ""},
+ {"SYS_SETRTABLE", Const, 1, ""},
+ {"SYS_SETSGROUPS", Const, 0, ""},
+ {"SYS_SETSID", Const, 0, ""},
+ {"SYS_SETSOCKOPT", Const, 0, ""},
+ {"SYS_SETTID", Const, 0, ""},
+ {"SYS_SETTID_WITH_PID", Const, 0, ""},
+ {"SYS_SETTIMEOFDAY", Const, 0, ""},
+ {"SYS_SETUID", Const, 0, ""},
+ {"SYS_SETUID32", Const, 0, ""},
+ {"SYS_SETWGROUPS", Const, 0, ""},
+ {"SYS_SETXATTR", Const, 0, ""},
+ {"SYS_SET_MEMPOLICY", Const, 0, ""},
+ {"SYS_SET_ROBUST_LIST", Const, 0, ""},
+ {"SYS_SET_THREAD_AREA", Const, 0, ""},
+ {"SYS_SET_TID_ADDRESS", Const, 0, ""},
+ {"SYS_SGETMASK", Const, 0, ""},
+ {"SYS_SHARED_REGION_CHECK_NP", Const, 0, ""},
+ {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0, ""},
+ {"SYS_SHMAT", Const, 0, ""},
+ {"SYS_SHMCTL", Const, 0, ""},
+ {"SYS_SHMDT", Const, 0, ""},
+ {"SYS_SHMGET", Const, 0, ""},
+ {"SYS_SHMSYS", Const, 0, ""},
+ {"SYS_SHM_OPEN", Const, 0, ""},
+ {"SYS_SHM_UNLINK", Const, 0, ""},
+ {"SYS_SHUTDOWN", Const, 0, ""},
+ {"SYS_SIGACTION", Const, 0, ""},
+ {"SYS_SIGALTSTACK", Const, 0, ""},
+ {"SYS_SIGNAL", Const, 0, ""},
+ {"SYS_SIGNALFD", Const, 0, ""},
+ {"SYS_SIGNALFD4", Const, 0, ""},
+ {"SYS_SIGPENDING", Const, 0, ""},
+ {"SYS_SIGPROCMASK", Const, 0, ""},
+ {"SYS_SIGQUEUE", Const, 0, ""},
+ {"SYS_SIGQUEUEINFO", Const, 1, ""},
+ {"SYS_SIGRETURN", Const, 0, ""},
+ {"SYS_SIGSUSPEND", Const, 0, ""},
+ {"SYS_SIGSUSPEND_NOCANCEL", Const, 0, ""},
+ {"SYS_SIGTIMEDWAIT", Const, 0, ""},
+ {"SYS_SIGWAIT", Const, 0, ""},
+ {"SYS_SIGWAITINFO", Const, 0, ""},
+ {"SYS_SOCKET", Const, 0, ""},
+ {"SYS_SOCKETCALL", Const, 0, ""},
+ {"SYS_SOCKETPAIR", Const, 0, ""},
+ {"SYS_SPLICE", Const, 0, ""},
+ {"SYS_SSETMASK", Const, 0, ""},
+ {"SYS_SSTK", Const, 0, ""},
+ {"SYS_STACK_SNAPSHOT", Const, 0, ""},
+ {"SYS_STAT", Const, 0, ""},
+ {"SYS_STAT64", Const, 0, ""},
+ {"SYS_STAT64_EXTENDED", Const, 0, ""},
+ {"SYS_STATFS", Const, 0, ""},
+ {"SYS_STATFS64", Const, 0, ""},
+ {"SYS_STATV", Const, 0, ""},
+ {"SYS_STATVFS1", Const, 1, ""},
+ {"SYS_STAT_EXTENDED", Const, 0, ""},
+ {"SYS_STIME", Const, 0, ""},
+ {"SYS_STTY", Const, 0, ""},
+ {"SYS_SWAPCONTEXT", Const, 0, ""},
+ {"SYS_SWAPCTL", Const, 1, ""},
+ {"SYS_SWAPOFF", Const, 0, ""},
+ {"SYS_SWAPON", Const, 0, ""},
+ {"SYS_SYMLINK", Const, 0, ""},
+ {"SYS_SYMLINKAT", Const, 0, ""},
+ {"SYS_SYNC", Const, 0, ""},
+ {"SYS_SYNCFS", Const, 0, ""},
+ {"SYS_SYNC_FILE_RANGE", Const, 0, ""},
+ {"SYS_SYSARCH", Const, 0, ""},
+ {"SYS_SYSCALL", Const, 0, ""},
+ {"SYS_SYSCALL_BASE", Const, 0, ""},
+ {"SYS_SYSFS", Const, 0, ""},
+ {"SYS_SYSINFO", Const, 0, ""},
+ {"SYS_SYSLOG", Const, 0, ""},
+ {"SYS_TEE", Const, 0, ""},
+ {"SYS_TGKILL", Const, 0, ""},
+ {"SYS_THREAD_SELFID", Const, 0, ""},
+ {"SYS_THR_CREATE", Const, 0, ""},
+ {"SYS_THR_EXIT", Const, 0, ""},
+ {"SYS_THR_KILL", Const, 0, ""},
+ {"SYS_THR_KILL2", Const, 0, ""},
+ {"SYS_THR_NEW", Const, 0, ""},
+ {"SYS_THR_SELF", Const, 0, ""},
+ {"SYS_THR_SET_NAME", Const, 0, ""},
+ {"SYS_THR_SUSPEND", Const, 0, ""},
+ {"SYS_THR_WAKE", Const, 0, ""},
+ {"SYS_TIME", Const, 0, ""},
+ {"SYS_TIMERFD_CREATE", Const, 0, ""},
+ {"SYS_TIMERFD_GETTIME", Const, 0, ""},
+ {"SYS_TIMERFD_SETTIME", Const, 0, ""},
+ {"SYS_TIMER_CREATE", Const, 0, ""},
+ {"SYS_TIMER_DELETE", Const, 0, ""},
+ {"SYS_TIMER_GETOVERRUN", Const, 0, ""},
+ {"SYS_TIMER_GETTIME", Const, 0, ""},
+ {"SYS_TIMER_SETTIME", Const, 0, ""},
+ {"SYS_TIMES", Const, 0, ""},
+ {"SYS_TKILL", Const, 0, ""},
+ {"SYS_TRUNCATE", Const, 0, ""},
+ {"SYS_TRUNCATE64", Const, 0, ""},
+ {"SYS_TUXCALL", Const, 0, ""},
+ {"SYS_UGETRLIMIT", Const, 0, ""},
+ {"SYS_ULIMIT", Const, 0, ""},
+ {"SYS_UMASK", Const, 0, ""},
+ {"SYS_UMASK_EXTENDED", Const, 0, ""},
+ {"SYS_UMOUNT", Const, 0, ""},
+ {"SYS_UMOUNT2", Const, 0, ""},
+ {"SYS_UNAME", Const, 0, ""},
+ {"SYS_UNDELETE", Const, 0, ""},
+ {"SYS_UNLINK", Const, 0, ""},
+ {"SYS_UNLINKAT", Const, 0, ""},
+ {"SYS_UNMOUNT", Const, 0, ""},
+ {"SYS_UNSHARE", Const, 0, ""},
+ {"SYS_USELIB", Const, 0, ""},
+ {"SYS_USTAT", Const, 0, ""},
+ {"SYS_UTIME", Const, 0, ""},
+ {"SYS_UTIMENSAT", Const, 0, ""},
+ {"SYS_UTIMES", Const, 0, ""},
+ {"SYS_UTRACE", Const, 0, ""},
+ {"SYS_UUIDGEN", Const, 0, ""},
+ {"SYS_VADVISE", Const, 1, ""},
+ {"SYS_VFORK", Const, 0, ""},
+ {"SYS_VHANGUP", Const, 0, ""},
+ {"SYS_VM86", Const, 0, ""},
+ {"SYS_VM86OLD", Const, 0, ""},
+ {"SYS_VMSPLICE", Const, 0, ""},
+ {"SYS_VM_PRESSURE_MONITOR", Const, 0, ""},
+ {"SYS_VSERVER", Const, 0, ""},
+ {"SYS_WAIT4", Const, 0, ""},
+ {"SYS_WAIT4_NOCANCEL", Const, 0, ""},
+ {"SYS_WAIT6", Const, 1, ""},
+ {"SYS_WAITEVENT", Const, 0, ""},
+ {"SYS_WAITID", Const, 0, ""},
+ {"SYS_WAITID_NOCANCEL", Const, 0, ""},
+ {"SYS_WAITPID", Const, 0, ""},
+ {"SYS_WATCHEVENT", Const, 0, ""},
+ {"SYS_WORKQ_KERNRETURN", Const, 0, ""},
+ {"SYS_WORKQ_OPEN", Const, 0, ""},
+ {"SYS_WRITE", Const, 0, ""},
+ {"SYS_WRITEV", Const, 0, ""},
+ {"SYS_WRITEV_NOCANCEL", Const, 0, ""},
+ {"SYS_WRITE_NOCANCEL", Const, 0, ""},
+ {"SYS_YIELD", Const, 0, ""},
+ {"SYS__LLSEEK", Const, 0, ""},
+ {"SYS__LWP_CONTINUE", Const, 1, ""},
+ {"SYS__LWP_CREATE", Const, 1, ""},
+ {"SYS__LWP_CTL", Const, 1, ""},
+ {"SYS__LWP_DETACH", Const, 1, ""},
+ {"SYS__LWP_EXIT", Const, 1, ""},
+ {"SYS__LWP_GETNAME", Const, 1, ""},
+ {"SYS__LWP_GETPRIVATE", Const, 1, ""},
+ {"SYS__LWP_KILL", Const, 1, ""},
+ {"SYS__LWP_PARK", Const, 1, ""},
+ {"SYS__LWP_SELF", Const, 1, ""},
+ {"SYS__LWP_SETNAME", Const, 1, ""},
+ {"SYS__LWP_SETPRIVATE", Const, 1, ""},
+ {"SYS__LWP_SUSPEND", Const, 1, ""},
+ {"SYS__LWP_UNPARK", Const, 1, ""},
+ {"SYS__LWP_UNPARK_ALL", Const, 1, ""},
+ {"SYS__LWP_WAIT", Const, 1, ""},
+ {"SYS__LWP_WAKEUP", Const, 1, ""},
+ {"SYS__NEWSELECT", Const, 0, ""},
+ {"SYS__PSET_BIND", Const, 1, ""},
+ {"SYS__SCHED_GETAFFINITY", Const, 1, ""},
+ {"SYS__SCHED_GETPARAM", Const, 1, ""},
+ {"SYS__SCHED_SETAFFINITY", Const, 1, ""},
+ {"SYS__SCHED_SETPARAM", Const, 1, ""},
+ {"SYS__SYSCTL", Const, 0, ""},
+ {"SYS__UMTX_LOCK", Const, 0, ""},
+ {"SYS__UMTX_OP", Const, 0, ""},
+ {"SYS__UMTX_UNLOCK", Const, 0, ""},
+ {"SYS___ACL_ACLCHECK_FD", Const, 0, ""},
+ {"SYS___ACL_ACLCHECK_FILE", Const, 0, ""},
+ {"SYS___ACL_ACLCHECK_LINK", Const, 0, ""},
+ {"SYS___ACL_DELETE_FD", Const, 0, ""},
+ {"SYS___ACL_DELETE_FILE", Const, 0, ""},
+ {"SYS___ACL_DELETE_LINK", Const, 0, ""},
+ {"SYS___ACL_GET_FD", Const, 0, ""},
+ {"SYS___ACL_GET_FILE", Const, 0, ""},
+ {"SYS___ACL_GET_LINK", Const, 0, ""},
+ {"SYS___ACL_SET_FD", Const, 0, ""},
+ {"SYS___ACL_SET_FILE", Const, 0, ""},
+ {"SYS___ACL_SET_LINK", Const, 0, ""},
+ {"SYS___CAP_RIGHTS_GET", Const, 14, ""},
+ {"SYS___CLONE", Const, 1, ""},
+ {"SYS___DISABLE_THREADSIGNAL", Const, 0, ""},
+ {"SYS___GETCWD", Const, 0, ""},
+ {"SYS___GETLOGIN", Const, 1, ""},
+ {"SYS___GET_TCB", Const, 1, ""},
+ {"SYS___MAC_EXECVE", Const, 0, ""},
+ {"SYS___MAC_GETFSSTAT", Const, 0, ""},
+ {"SYS___MAC_GET_FD", Const, 0, ""},
+ {"SYS___MAC_GET_FILE", Const, 0, ""},
+ {"SYS___MAC_GET_LCID", Const, 0, ""},
+ {"SYS___MAC_GET_LCTX", Const, 0, ""},
+ {"SYS___MAC_GET_LINK", Const, 0, ""},
+ {"SYS___MAC_GET_MOUNT", Const, 0, ""},
+ {"SYS___MAC_GET_PID", Const, 0, ""},
+ {"SYS___MAC_GET_PROC", Const, 0, ""},
+ {"SYS___MAC_MOUNT", Const, 0, ""},
+ {"SYS___MAC_SET_FD", Const, 0, ""},
+ {"SYS___MAC_SET_FILE", Const, 0, ""},
+ {"SYS___MAC_SET_LCTX", Const, 0, ""},
+ {"SYS___MAC_SET_LINK", Const, 0, ""},
+ {"SYS___MAC_SET_PROC", Const, 0, ""},
+ {"SYS___MAC_SYSCALL", Const, 0, ""},
+ {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0, ""},
+ {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
+ {"SYS___POSIX_CHOWN", Const, 1, ""},
+ {"SYS___POSIX_FCHOWN", Const, 1, ""},
+ {"SYS___POSIX_LCHOWN", Const, 1, ""},
+ {"SYS___POSIX_RENAME", Const, 1, ""},
+ {"SYS___PTHREAD_CANCELED", Const, 0, ""},
+ {"SYS___PTHREAD_CHDIR", Const, 0, ""},
+ {"SYS___PTHREAD_FCHDIR", Const, 0, ""},
+ {"SYS___PTHREAD_KILL", Const, 0, ""},
+ {"SYS___PTHREAD_MARKCANCEL", Const, 0, ""},
+ {"SYS___PTHREAD_SIGMASK", Const, 0, ""},
+ {"SYS___QUOTACTL", Const, 1, ""},
+ {"SYS___SEMCTL", Const, 1, ""},
+ {"SYS___SEMWAIT_SIGNAL", Const, 0, ""},
+ {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
+ {"SYS___SETLOGIN", Const, 1, ""},
+ {"SYS___SETUGID", Const, 0, ""},
+ {"SYS___SET_TCB", Const, 1, ""},
+ {"SYS___SIGACTION_SIGTRAMP", Const, 1, ""},
+ {"SYS___SIGTIMEDWAIT", Const, 1, ""},
+ {"SYS___SIGWAIT", Const, 0, ""},
+ {"SYS___SIGWAIT_NOCANCEL", Const, 0, ""},
+ {"SYS___SYSCTL", Const, 0, ""},
+ {"SYS___TFORK", Const, 1, ""},
+ {"SYS___THREXIT", Const, 1, ""},
+ {"SYS___THRSIGDIVERT", Const, 1, ""},
+ {"SYS___THRSLEEP", Const, 1, ""},
+ {"SYS___THRWAKEUP", Const, 1, ""},
+ {"S_ARCH1", Const, 1, ""},
+ {"S_ARCH2", Const, 1, ""},
+ {"S_BLKSIZE", Const, 0, ""},
+ {"S_IEXEC", Const, 0, ""},
+ {"S_IFBLK", Const, 0, ""},
+ {"S_IFCHR", Const, 0, ""},
+ {"S_IFDIR", Const, 0, ""},
+ {"S_IFIFO", Const, 0, ""},
+ {"S_IFLNK", Const, 0, ""},
+ {"S_IFMT", Const, 0, ""},
+ {"S_IFREG", Const, 0, ""},
+ {"S_IFSOCK", Const, 0, ""},
+ {"S_IFWHT", Const, 0, ""},
+ {"S_IREAD", Const, 0, ""},
+ {"S_IRGRP", Const, 0, ""},
+ {"S_IROTH", Const, 0, ""},
+ {"S_IRUSR", Const, 0, ""},
+ {"S_IRWXG", Const, 0, ""},
+ {"S_IRWXO", Const, 0, ""},
+ {"S_IRWXU", Const, 0, ""},
+ {"S_ISGID", Const, 0, ""},
+ {"S_ISTXT", Const, 0, ""},
+ {"S_ISUID", Const, 0, ""},
+ {"S_ISVTX", Const, 0, ""},
+ {"S_IWGRP", Const, 0, ""},
+ {"S_IWOTH", Const, 0, ""},
+ {"S_IWRITE", Const, 0, ""},
+ {"S_IWUSR", Const, 0, ""},
+ {"S_IXGRP", Const, 0, ""},
+ {"S_IXOTH", Const, 0, ""},
+ {"S_IXUSR", Const, 0, ""},
+ {"S_LOGIN_SET", Const, 1, ""},
+ {"SecurityAttributes", Type, 0, ""},
+ {"SecurityAttributes.InheritHandle", Field, 0, ""},
+ {"SecurityAttributes.Length", Field, 0, ""},
+ {"SecurityAttributes.SecurityDescriptor", Field, 0, ""},
+ {"Seek", Func, 0, "func(fd int, offset int64, whence int) (off int64, err error)"},
+ {"Select", Func, 0, "func(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)"},
+ {"Sendfile", Func, 0, "func(outfd int, infd int, offset *int64, count int) (written int, err error)"},
+ {"Sendmsg", Func, 0, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (err error)"},
+ {"SendmsgN", Func, 3, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (n int, err error)"},
+ {"Sendto", Func, 0, "func(fd int, p []byte, flags int, to Sockaddr) (err error)"},
+ {"Servent", Type, 0, ""},
+ {"Servent.Aliases", Field, 0, ""},
+ {"Servent.Name", Field, 0, ""},
+ {"Servent.Port", Field, 0, ""},
+ {"Servent.Proto", Field, 0, ""},
+ {"SetBpf", Func, 0, ""},
+ {"SetBpfBuflen", Func, 0, ""},
+ {"SetBpfDatalink", Func, 0, ""},
+ {"SetBpfHeadercmpl", Func, 0, ""},
+ {"SetBpfImmediate", Func, 0, ""},
+ {"SetBpfInterface", Func, 0, ""},
+ {"SetBpfPromisc", Func, 0, ""},
+ {"SetBpfTimeout", Func, 0, ""},
+ {"SetCurrentDirectory", Func, 0, ""},
+ {"SetEndOfFile", Func, 0, ""},
+ {"SetEnvironmentVariable", Func, 0, ""},
+ {"SetFileAttributes", Func, 0, ""},
+ {"SetFileCompletionNotificationModes", Func, 2, ""},
+ {"SetFilePointer", Func, 0, ""},
+ {"SetFileTime", Func, 0, ""},
+ {"SetHandleInformation", Func, 0, ""},
+ {"SetKevent", Func, 0, ""},
+ {"SetLsfPromisc", Func, 0, "func(name string, m bool) error"},
+ {"SetNonblock", Func, 0, "func(fd int, nonblocking bool) (err error)"},
+ {"Setdomainname", Func, 0, "func(p []byte) (err error)"},
+ {"Setegid", Func, 0, "func(egid int) (err error)"},
+ {"Setenv", Func, 0, "func(key string, value string) error"},
+ {"Seteuid", Func, 0, "func(euid int) (err error)"},
+ {"Setfsgid", Func, 0, "func(gid int) (err error)"},
+ {"Setfsuid", Func, 0, "func(uid int) (err error)"},
+ {"Setgid", Func, 0, "func(gid int) (err error)"},
+ {"Setgroups", Func, 0, "func(gids []int) (err error)"},
+ {"Sethostname", Func, 0, "func(p []byte) (err error)"},
+ {"Setlogin", Func, 0, ""},
+ {"Setpgid", Func, 0, "func(pid int, pgid int) (err error)"},
+ {"Setpriority", Func, 0, "func(which int, who int, prio int) (err error)"},
+ {"Setprivexec", Func, 0, ""},
+ {"Setregid", Func, 0, "func(rgid int, egid int) (err error)"},
+ {"Setresgid", Func, 0, "func(rgid int, egid int, sgid int) (err error)"},
+ {"Setresuid", Func, 0, "func(ruid int, euid int, suid int) (err error)"},
+ {"Setreuid", Func, 0, "func(ruid int, euid int) (err error)"},
+ {"Setrlimit", Func, 0, "func(resource int, rlim *Rlimit) error"},
+ {"Setsid", Func, 0, "func() (pid int, err error)"},
+ {"Setsockopt", Func, 0, ""},
+ {"SetsockoptByte", Func, 0, "func(fd int, level int, opt int, value byte) (err error)"},
+ {"SetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int, filter *ICMPv6Filter) error"},
+ {"SetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int, mreq *IPMreq) (err error)"},
+ {"SetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int, mreq *IPMreqn) (err error)"},
+ {"SetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int, mreq *IPv6Mreq) (err error)"},
+ {"SetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int, value [4]byte) (err error)"},
+ {"SetsockoptInt", Func, 0, "func(fd int, level int, opt int, value int) (err error)"},
+ {"SetsockoptLinger", Func, 0, "func(fd int, level int, opt int, l *Linger) (err error)"},
+ {"SetsockoptString", Func, 0, "func(fd int, level int, opt int, s string) (err error)"},
+ {"SetsockoptTimeval", Func, 0, "func(fd int, level int, opt int, tv *Timeval) (err error)"},
+ {"Settimeofday", Func, 0, "func(tv *Timeval) (err error)"},
+ {"Setuid", Func, 0, "func(uid int) (err error)"},
+ {"Setxattr", Func, 1, "func(path string, attr string, data []byte, flags int) (err error)"},
+ {"Shutdown", Func, 0, "func(fd int, how int) (err error)"},
+ {"SidTypeAlias", Const, 0, ""},
+ {"SidTypeComputer", Const, 0, ""},
+ {"SidTypeDeletedAccount", Const, 0, ""},
+ {"SidTypeDomain", Const, 0, ""},
+ {"SidTypeGroup", Const, 0, ""},
+ {"SidTypeInvalid", Const, 0, ""},
+ {"SidTypeLabel", Const, 0, ""},
+ {"SidTypeUnknown", Const, 0, ""},
+ {"SidTypeUser", Const, 0, ""},
+ {"SidTypeWellKnownGroup", Const, 0, ""},
+ {"Signal", Type, 0, ""},
+ {"SizeofBpfHdr", Const, 0, ""},
+ {"SizeofBpfInsn", Const, 0, ""},
+ {"SizeofBpfProgram", Const, 0, ""},
+ {"SizeofBpfStat", Const, 0, ""},
+ {"SizeofBpfVersion", Const, 0, ""},
+ {"SizeofBpfZbuf", Const, 0, ""},
+ {"SizeofBpfZbufHeader", Const, 0, ""},
+ {"SizeofCmsghdr", Const, 0, ""},
+ {"SizeofICMPv6Filter", Const, 2, ""},
+ {"SizeofIPMreq", Const, 0, ""},
+ {"SizeofIPMreqn", Const, 0, ""},
+ {"SizeofIPv6MTUInfo", Const, 2, ""},
+ {"SizeofIPv6Mreq", Const, 0, ""},
+ {"SizeofIfAddrmsg", Const, 0, ""},
+ {"SizeofIfAnnounceMsghdr", Const, 1, ""},
+ {"SizeofIfData", Const, 0, ""},
+ {"SizeofIfInfomsg", Const, 0, ""},
+ {"SizeofIfMsghdr", Const, 0, ""},
+ {"SizeofIfaMsghdr", Const, 0, ""},
+ {"SizeofIfmaMsghdr", Const, 0, ""},
+ {"SizeofIfmaMsghdr2", Const, 0, ""},
+ {"SizeofInet4Pktinfo", Const, 0, ""},
+ {"SizeofInet6Pktinfo", Const, 0, ""},
+ {"SizeofInotifyEvent", Const, 0, ""},
+ {"SizeofLinger", Const, 0, ""},
+ {"SizeofMsghdr", Const, 0, ""},
+ {"SizeofNlAttr", Const, 0, ""},
+ {"SizeofNlMsgerr", Const, 0, ""},
+ {"SizeofNlMsghdr", Const, 0, ""},
+ {"SizeofRtAttr", Const, 0, ""},
+ {"SizeofRtGenmsg", Const, 0, ""},
+ {"SizeofRtMetrics", Const, 0, ""},
+ {"SizeofRtMsg", Const, 0, ""},
+ {"SizeofRtMsghdr", Const, 0, ""},
+ {"SizeofRtNexthop", Const, 0, ""},
+ {"SizeofSockFilter", Const, 0, ""},
+ {"SizeofSockFprog", Const, 0, ""},
+ {"SizeofSockaddrAny", Const, 0, ""},
+ {"SizeofSockaddrDatalink", Const, 0, ""},
+ {"SizeofSockaddrInet4", Const, 0, ""},
+ {"SizeofSockaddrInet6", Const, 0, ""},
+ {"SizeofSockaddrLinklayer", Const, 0, ""},
+ {"SizeofSockaddrNetlink", Const, 0, ""},
+ {"SizeofSockaddrUnix", Const, 0, ""},
+ {"SizeofTCPInfo", Const, 1, ""},
+ {"SizeofUcred", Const, 0, ""},
+ {"SlicePtrFromStrings", Func, 1, "func(ss []string) ([]*byte, error)"},
+ {"SockFilter", Type, 0, ""},
+ {"SockFilter.Code", Field, 0, ""},
+ {"SockFilter.Jf", Field, 0, ""},
+ {"SockFilter.Jt", Field, 0, ""},
+ {"SockFilter.K", Field, 0, ""},
+ {"SockFprog", Type, 0, ""},
+ {"SockFprog.Filter", Field, 0, ""},
+ {"SockFprog.Len", Field, 0, ""},
+ {"SockFprog.Pad_cgo_0", Field, 0, ""},
+ {"Sockaddr", Type, 0, ""},
+ {"SockaddrDatalink", Type, 0, ""},
+ {"SockaddrDatalink.Alen", Field, 0, ""},
+ {"SockaddrDatalink.Data", Field, 0, ""},
+ {"SockaddrDatalink.Family", Field, 0, ""},
+ {"SockaddrDatalink.Index", Field, 0, ""},
+ {"SockaddrDatalink.Len", Field, 0, ""},
+ {"SockaddrDatalink.Nlen", Field, 0, ""},
+ {"SockaddrDatalink.Slen", Field, 0, ""},
+ {"SockaddrDatalink.Type", Field, 0, ""},
+ {"SockaddrGen", Type, 0, ""},
+ {"SockaddrInet4", Type, 0, ""},
+ {"SockaddrInet4.Addr", Field, 0, ""},
+ {"SockaddrInet4.Port", Field, 0, ""},
+ {"SockaddrInet6", Type, 0, ""},
+ {"SockaddrInet6.Addr", Field, 0, ""},
+ {"SockaddrInet6.Port", Field, 0, ""},
+ {"SockaddrInet6.ZoneId", Field, 0, ""},
+ {"SockaddrLinklayer", Type, 0, ""},
+ {"SockaddrLinklayer.Addr", Field, 0, ""},
+ {"SockaddrLinklayer.Halen", Field, 0, ""},
+ {"SockaddrLinklayer.Hatype", Field, 0, ""},
+ {"SockaddrLinklayer.Ifindex", Field, 0, ""},
+ {"SockaddrLinklayer.Pkttype", Field, 0, ""},
+ {"SockaddrLinklayer.Protocol", Field, 0, ""},
+ {"SockaddrNetlink", Type, 0, ""},
+ {"SockaddrNetlink.Family", Field, 0, ""},
+ {"SockaddrNetlink.Groups", Field, 0, ""},
+ {"SockaddrNetlink.Pad", Field, 0, ""},
+ {"SockaddrNetlink.Pid", Field, 0, ""},
+ {"SockaddrUnix", Type, 0, ""},
+ {"SockaddrUnix.Name", Field, 0, ""},
+ {"Socket", Func, 0, "func(domain int, typ int, proto int) (fd int, err error)"},
+ {"SocketControlMessage", Type, 0, ""},
+ {"SocketControlMessage.Data", Field, 0, ""},
+ {"SocketControlMessage.Header", Field, 0, ""},
+ {"SocketDisableIPv6", Var, 0, ""},
+ {"Socketpair", Func, 0, "func(domain int, typ int, proto int) (fd [2]int, err error)"},
+ {"Splice", Func, 0, "func(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)"},
+ {"StartProcess", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error)"},
+ {"StartupInfo", Type, 0, ""},
+ {"StartupInfo.Cb", Field, 0, ""},
+ {"StartupInfo.Desktop", Field, 0, ""},
+ {"StartupInfo.FillAttribute", Field, 0, ""},
+ {"StartupInfo.Flags", Field, 0, ""},
+ {"StartupInfo.ShowWindow", Field, 0, ""},
+ {"StartupInfo.StdErr", Field, 0, ""},
+ {"StartupInfo.StdInput", Field, 0, ""},
+ {"StartupInfo.StdOutput", Field, 0, ""},
+ {"StartupInfo.Title", Field, 0, ""},
+ {"StartupInfo.X", Field, 0, ""},
+ {"StartupInfo.XCountChars", Field, 0, ""},
+ {"StartupInfo.XSize", Field, 0, ""},
+ {"StartupInfo.Y", Field, 0, ""},
+ {"StartupInfo.YCountChars", Field, 0, ""},
+ {"StartupInfo.YSize", Field, 0, ""},
+ {"Stat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
+ {"Stat_t", Type, 0, ""},
+ {"Stat_t.Atim", Field, 0, ""},
+ {"Stat_t.Atim_ext", Field, 12, ""},
+ {"Stat_t.Atimespec", Field, 0, ""},
+ {"Stat_t.Birthtimespec", Field, 0, ""},
+ {"Stat_t.Blksize", Field, 0, ""},
+ {"Stat_t.Blocks", Field, 0, ""},
+ {"Stat_t.Btim_ext", Field, 12, ""},
+ {"Stat_t.Ctim", Field, 0, ""},
+ {"Stat_t.Ctim_ext", Field, 12, ""},
+ {"Stat_t.Ctimespec", Field, 0, ""},
+ {"Stat_t.Dev", Field, 0, ""},
+ {"Stat_t.Flags", Field, 0, ""},
+ {"Stat_t.Gen", Field, 0, ""},
+ {"Stat_t.Gid", Field, 0, ""},
+ {"Stat_t.Ino", Field, 0, ""},
+ {"Stat_t.Lspare", Field, 0, ""},
+ {"Stat_t.Lspare0", Field, 2, ""},
+ {"Stat_t.Lspare1", Field, 2, ""},
+ {"Stat_t.Mode", Field, 0, ""},
+ {"Stat_t.Mtim", Field, 0, ""},
+ {"Stat_t.Mtim_ext", Field, 12, ""},
+ {"Stat_t.Mtimespec", Field, 0, ""},
+ {"Stat_t.Nlink", Field, 0, ""},
+ {"Stat_t.Pad_cgo_0", Field, 0, ""},
+ {"Stat_t.Pad_cgo_1", Field, 0, ""},
+ {"Stat_t.Pad_cgo_2", Field, 0, ""},
+ {"Stat_t.Padding0", Field, 12, ""},
+ {"Stat_t.Padding1", Field, 12, ""},
+ {"Stat_t.Qspare", Field, 0, ""},
+ {"Stat_t.Rdev", Field, 0, ""},
+ {"Stat_t.Size", Field, 0, ""},
+ {"Stat_t.Spare", Field, 2, ""},
+ {"Stat_t.Uid", Field, 0, ""},
+ {"Stat_t.X__pad0", Field, 0, ""},
+ {"Stat_t.X__pad1", Field, 0, ""},
+ {"Stat_t.X__pad2", Field, 0, ""},
+ {"Stat_t.X__st_birthtim", Field, 2, ""},
+ {"Stat_t.X__st_ino", Field, 0, ""},
+ {"Stat_t.X__unused", Field, 0, ""},
+ {"Statfs", Func, 0, "func(path string, buf *Statfs_t) (err error)"},
+ {"Statfs_t", Type, 0, ""},
+ {"Statfs_t.Asyncreads", Field, 0, ""},
+ {"Statfs_t.Asyncwrites", Field, 0, ""},
+ {"Statfs_t.Bavail", Field, 0, ""},
+ {"Statfs_t.Bfree", Field, 0, ""},
+ {"Statfs_t.Blocks", Field, 0, ""},
+ {"Statfs_t.Bsize", Field, 0, ""},
+ {"Statfs_t.Charspare", Field, 0, ""},
+ {"Statfs_t.F_asyncreads", Field, 2, ""},
+ {"Statfs_t.F_asyncwrites", Field, 2, ""},
+ {"Statfs_t.F_bavail", Field, 2, ""},
+ {"Statfs_t.F_bfree", Field, 2, ""},
+ {"Statfs_t.F_blocks", Field, 2, ""},
+ {"Statfs_t.F_bsize", Field, 2, ""},
+ {"Statfs_t.F_ctime", Field, 2, ""},
+ {"Statfs_t.F_favail", Field, 2, ""},
+ {"Statfs_t.F_ffree", Field, 2, ""},
+ {"Statfs_t.F_files", Field, 2, ""},
+ {"Statfs_t.F_flags", Field, 2, ""},
+ {"Statfs_t.F_fsid", Field, 2, ""},
+ {"Statfs_t.F_fstypename", Field, 2, ""},
+ {"Statfs_t.F_iosize", Field, 2, ""},
+ {"Statfs_t.F_mntfromname", Field, 2, ""},
+ {"Statfs_t.F_mntfromspec", Field, 3, ""},
+ {"Statfs_t.F_mntonname", Field, 2, ""},
+ {"Statfs_t.F_namemax", Field, 2, ""},
+ {"Statfs_t.F_owner", Field, 2, ""},
+ {"Statfs_t.F_spare", Field, 2, ""},
+ {"Statfs_t.F_syncreads", Field, 2, ""},
+ {"Statfs_t.F_syncwrites", Field, 2, ""},
+ {"Statfs_t.Ffree", Field, 0, ""},
+ {"Statfs_t.Files", Field, 0, ""},
+ {"Statfs_t.Flags", Field, 0, ""},
+ {"Statfs_t.Frsize", Field, 0, ""},
+ {"Statfs_t.Fsid", Field, 0, ""},
+ {"Statfs_t.Fssubtype", Field, 0, ""},
+ {"Statfs_t.Fstypename", Field, 0, ""},
+ {"Statfs_t.Iosize", Field, 0, ""},
+ {"Statfs_t.Mntfromname", Field, 0, ""},
+ {"Statfs_t.Mntonname", Field, 0, ""},
+ {"Statfs_t.Mount_info", Field, 2, ""},
+ {"Statfs_t.Namelen", Field, 0, ""},
+ {"Statfs_t.Namemax", Field, 0, ""},
+ {"Statfs_t.Owner", Field, 0, ""},
+ {"Statfs_t.Pad_cgo_0", Field, 0, ""},
+ {"Statfs_t.Pad_cgo_1", Field, 2, ""},
+ {"Statfs_t.Reserved", Field, 0, ""},
+ {"Statfs_t.Spare", Field, 0, ""},
+ {"Statfs_t.Syncreads", Field, 0, ""},
+ {"Statfs_t.Syncwrites", Field, 0, ""},
+ {"Statfs_t.Type", Field, 0, ""},
+ {"Statfs_t.Version", Field, 0, ""},
+ {"Stderr", Var, 0, ""},
+ {"Stdin", Var, 0, ""},
+ {"Stdout", Var, 0, ""},
+ {"StringBytePtr", Func, 0, "func(s string) *byte"},
+ {"StringByteSlice", Func, 0, "func(s string) []byte"},
+ {"StringSlicePtr", Func, 0, "func(ss []string) []*byte"},
+ {"StringToSid", Func, 0, ""},
+ {"StringToUTF16", Func, 0, ""},
+ {"StringToUTF16Ptr", Func, 0, ""},
+ {"Symlink", Func, 0, "func(oldpath string, newpath string) (err error)"},
+ {"Sync", Func, 0, "func()"},
+ {"SyncFileRange", Func, 0, "func(fd int, off int64, n int64, flags int) (err error)"},
+ {"SysProcAttr", Type, 0, ""},
+ {"SysProcAttr.AdditionalInheritedHandles", Field, 17, ""},
+ {"SysProcAttr.AmbientCaps", Field, 9, ""},
+ {"SysProcAttr.CgroupFD", Field, 20, ""},
+ {"SysProcAttr.Chroot", Field, 0, ""},
+ {"SysProcAttr.Cloneflags", Field, 2, ""},
+ {"SysProcAttr.CmdLine", Field, 0, ""},
+ {"SysProcAttr.CreationFlags", Field, 1, ""},
+ {"SysProcAttr.Credential", Field, 0, ""},
+ {"SysProcAttr.Ctty", Field, 1, ""},
+ {"SysProcAttr.Foreground", Field, 5, ""},
+ {"SysProcAttr.GidMappings", Field, 4, ""},
+ {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5, ""},
+ {"SysProcAttr.HideWindow", Field, 0, ""},
+ {"SysProcAttr.Jail", Field, 21, ""},
+ {"SysProcAttr.NoInheritHandles", Field, 16, ""},
+ {"SysProcAttr.Noctty", Field, 0, ""},
+ {"SysProcAttr.ParentProcess", Field, 17, ""},
+ {"SysProcAttr.Pdeathsig", Field, 0, ""},
+ {"SysProcAttr.Pgid", Field, 5, ""},
+ {"SysProcAttr.PidFD", Field, 22, ""},
+ {"SysProcAttr.ProcessAttributes", Field, 13, ""},
+ {"SysProcAttr.Ptrace", Field, 0, ""},
+ {"SysProcAttr.Setctty", Field, 0, ""},
+ {"SysProcAttr.Setpgid", Field, 0, ""},
+ {"SysProcAttr.Setsid", Field, 0, ""},
+ {"SysProcAttr.ThreadAttributes", Field, 13, ""},
+ {"SysProcAttr.Token", Field, 10, ""},
+ {"SysProcAttr.UidMappings", Field, 4, ""},
+ {"SysProcAttr.Unshareflags", Field, 7, ""},
+ {"SysProcAttr.UseCgroupFD", Field, 20, ""},
+ {"SysProcIDMap", Type, 4, ""},
+ {"SysProcIDMap.ContainerID", Field, 4, ""},
+ {"SysProcIDMap.HostID", Field, 4, ""},
+ {"SysProcIDMap.Size", Field, 4, ""},
+ {"Syscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"Syscall12", Func, 0, ""},
+ {"Syscall15", Func, 0, ""},
+ {"Syscall18", Func, 12, ""},
+ {"Syscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"Syscall9", Func, 0, ""},
+ {"SyscallN", Func, 18, ""},
+ {"Sysctl", Func, 0, ""},
+ {"SysctlUint32", Func, 0, ""},
+ {"Sysctlnode", Type, 2, ""},
+ {"Sysctlnode.Flags", Field, 2, ""},
+ {"Sysctlnode.Name", Field, 2, ""},
+ {"Sysctlnode.Num", Field, 2, ""},
+ {"Sysctlnode.Un", Field, 2, ""},
+ {"Sysctlnode.Ver", Field, 2, ""},
+ {"Sysctlnode.X__rsvd", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_desc", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_func", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_parent", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_size", Field, 2, ""},
+ {"Sysinfo", Func, 0, "func(info *Sysinfo_t) (err error)"},
+ {"Sysinfo_t", Type, 0, ""},
+ {"Sysinfo_t.Bufferram", Field, 0, ""},
+ {"Sysinfo_t.Freehigh", Field, 0, ""},
+ {"Sysinfo_t.Freeram", Field, 0, ""},
+ {"Sysinfo_t.Freeswap", Field, 0, ""},
+ {"Sysinfo_t.Loads", Field, 0, ""},
+ {"Sysinfo_t.Pad", Field, 0, ""},
+ {"Sysinfo_t.Pad_cgo_0", Field, 0, ""},
+ {"Sysinfo_t.Pad_cgo_1", Field, 0, ""},
+ {"Sysinfo_t.Procs", Field, 0, ""},
+ {"Sysinfo_t.Sharedram", Field, 0, ""},
+ {"Sysinfo_t.Totalhigh", Field, 0, ""},
+ {"Sysinfo_t.Totalram", Field, 0, ""},
+ {"Sysinfo_t.Totalswap", Field, 0, ""},
+ {"Sysinfo_t.Unit", Field, 0, ""},
+ {"Sysinfo_t.Uptime", Field, 0, ""},
+ {"Sysinfo_t.X_f", Field, 0, ""},
+ {"Systemtime", Type, 0, ""},
+ {"Systemtime.Day", Field, 0, ""},
+ {"Systemtime.DayOfWeek", Field, 0, ""},
+ {"Systemtime.Hour", Field, 0, ""},
+ {"Systemtime.Milliseconds", Field, 0, ""},
+ {"Systemtime.Minute", Field, 0, ""},
+ {"Systemtime.Month", Field, 0, ""},
+ {"Systemtime.Second", Field, 0, ""},
+ {"Systemtime.Year", Field, 0, ""},
+ {"TCGETS", Const, 0, ""},
+ {"TCIFLUSH", Const, 1, ""},
+ {"TCIOFLUSH", Const, 1, ""},
+ {"TCOFLUSH", Const, 1, ""},
+ {"TCPInfo", Type, 1, ""},
+ {"TCPInfo.Advmss", Field, 1, ""},
+ {"TCPInfo.Ato", Field, 1, ""},
+ {"TCPInfo.Backoff", Field, 1, ""},
+ {"TCPInfo.Ca_state", Field, 1, ""},
+ {"TCPInfo.Fackets", Field, 1, ""},
+ {"TCPInfo.Last_ack_recv", Field, 1, ""},
+ {"TCPInfo.Last_ack_sent", Field, 1, ""},
+ {"TCPInfo.Last_data_recv", Field, 1, ""},
+ {"TCPInfo.Last_data_sent", Field, 1, ""},
+ {"TCPInfo.Lost", Field, 1, ""},
+ {"TCPInfo.Options", Field, 1, ""},
+ {"TCPInfo.Pad_cgo_0", Field, 1, ""},
+ {"TCPInfo.Pmtu", Field, 1, ""},
+ {"TCPInfo.Probes", Field, 1, ""},
+ {"TCPInfo.Rcv_mss", Field, 1, ""},
+ {"TCPInfo.Rcv_rtt", Field, 1, ""},
+ {"TCPInfo.Rcv_space", Field, 1, ""},
+ {"TCPInfo.Rcv_ssthresh", Field, 1, ""},
+ {"TCPInfo.Reordering", Field, 1, ""},
+ {"TCPInfo.Retrans", Field, 1, ""},
+ {"TCPInfo.Retransmits", Field, 1, ""},
+ {"TCPInfo.Rto", Field, 1, ""},
+ {"TCPInfo.Rtt", Field, 1, ""},
+ {"TCPInfo.Rttvar", Field, 1, ""},
+ {"TCPInfo.Sacked", Field, 1, ""},
+ {"TCPInfo.Snd_cwnd", Field, 1, ""},
+ {"TCPInfo.Snd_mss", Field, 1, ""},
+ {"TCPInfo.Snd_ssthresh", Field, 1, ""},
+ {"TCPInfo.State", Field, 1, ""},
+ {"TCPInfo.Total_retrans", Field, 1, ""},
+ {"TCPInfo.Unacked", Field, 1, ""},
+ {"TCPKeepalive", Type, 3, ""},
+ {"TCPKeepalive.Interval", Field, 3, ""},
+ {"TCPKeepalive.OnOff", Field, 3, ""},
+ {"TCPKeepalive.Time", Field, 3, ""},
+ {"TCP_CA_NAME_MAX", Const, 0, ""},
+ {"TCP_CONGCTL", Const, 1, ""},
+ {"TCP_CONGESTION", Const, 0, ""},
+ {"TCP_CONNECTIONTIMEOUT", Const, 0, ""},
+ {"TCP_CORK", Const, 0, ""},
+ {"TCP_DEFER_ACCEPT", Const, 0, ""},
+ {"TCP_ENABLE_ECN", Const, 16, ""},
+ {"TCP_INFO", Const, 0, ""},
+ {"TCP_KEEPALIVE", Const, 0, ""},
+ {"TCP_KEEPCNT", Const, 0, ""},
+ {"TCP_KEEPIDLE", Const, 0, ""},
+ {"TCP_KEEPINIT", Const, 1, ""},
+ {"TCP_KEEPINTVL", Const, 0, ""},
+ {"TCP_LINGER2", Const, 0, ""},
+ {"TCP_MAXBURST", Const, 0, ""},
+ {"TCP_MAXHLEN", Const, 0, ""},
+ {"TCP_MAXOLEN", Const, 0, ""},
+ {"TCP_MAXSEG", Const, 0, ""},
+ {"TCP_MAXWIN", Const, 0, ""},
+ {"TCP_MAX_SACK", Const, 0, ""},
+ {"TCP_MAX_WINSHIFT", Const, 0, ""},
+ {"TCP_MD5SIG", Const, 0, ""},
+ {"TCP_MD5SIG_MAXKEYLEN", Const, 0, ""},
+ {"TCP_MINMSS", Const, 0, ""},
+ {"TCP_MINMSSOVERLOAD", Const, 0, ""},
+ {"TCP_MSS", Const, 0, ""},
+ {"TCP_NODELAY", Const, 0, ""},
+ {"TCP_NOOPT", Const, 0, ""},
+ {"TCP_NOPUSH", Const, 0, ""},
+ {"TCP_NOTSENT_LOWAT", Const, 16, ""},
+ {"TCP_NSTATES", Const, 1, ""},
+ {"TCP_QUICKACK", Const, 0, ""},
+ {"TCP_RXT_CONNDROPTIME", Const, 0, ""},
+ {"TCP_RXT_FINDROP", Const, 0, ""},
+ {"TCP_SACK_ENABLE", Const, 1, ""},
+ {"TCP_SENDMOREACKS", Const, 16, ""},
+ {"TCP_SYNCNT", Const, 0, ""},
+ {"TCP_VENDOR", Const, 3, ""},
+ {"TCP_WINDOW_CLAMP", Const, 0, ""},
+ {"TCSAFLUSH", Const, 1, ""},
+ {"TCSETS", Const, 0, ""},
+ {"TF_DISCONNECT", Const, 0, ""},
+ {"TF_REUSE_SOCKET", Const, 0, ""},
+ {"TF_USE_DEFAULT_WORKER", Const, 0, ""},
+ {"TF_USE_KERNEL_APC", Const, 0, ""},
+ {"TF_USE_SYSTEM_THREAD", Const, 0, ""},
+ {"TF_WRITE_BEHIND", Const, 0, ""},
+ {"TH32CS_INHERIT", Const, 4, ""},
+ {"TH32CS_SNAPALL", Const, 4, ""},
+ {"TH32CS_SNAPHEAPLIST", Const, 4, ""},
+ {"TH32CS_SNAPMODULE", Const, 4, ""},
+ {"TH32CS_SNAPMODULE32", Const, 4, ""},
+ {"TH32CS_SNAPPROCESS", Const, 4, ""},
+ {"TH32CS_SNAPTHREAD", Const, 4, ""},
+ {"TIME_ZONE_ID_DAYLIGHT", Const, 0, ""},
+ {"TIME_ZONE_ID_STANDARD", Const, 0, ""},
+ {"TIME_ZONE_ID_UNKNOWN", Const, 0, ""},
+ {"TIOCCBRK", Const, 0, ""},
+ {"TIOCCDTR", Const, 0, ""},
+ {"TIOCCONS", Const, 0, ""},
+ {"TIOCDCDTIMESTAMP", Const, 0, ""},
+ {"TIOCDRAIN", Const, 0, ""},
+ {"TIOCDSIMICROCODE", Const, 0, ""},
+ {"TIOCEXCL", Const, 0, ""},
+ {"TIOCEXT", Const, 0, ""},
+ {"TIOCFLAG_CDTRCTS", Const, 1, ""},
+ {"TIOCFLAG_CLOCAL", Const, 1, ""},
+ {"TIOCFLAG_CRTSCTS", Const, 1, ""},
+ {"TIOCFLAG_MDMBUF", Const, 1, ""},
+ {"TIOCFLAG_PPS", Const, 1, ""},
+ {"TIOCFLAG_SOFTCAR", Const, 1, ""},
+ {"TIOCFLUSH", Const, 0, ""},
+ {"TIOCGDEV", Const, 0, ""},
+ {"TIOCGDRAINWAIT", Const, 0, ""},
+ {"TIOCGETA", Const, 0, ""},
+ {"TIOCGETD", Const, 0, ""},
+ {"TIOCGFLAGS", Const, 1, ""},
+ {"TIOCGICOUNT", Const, 0, ""},
+ {"TIOCGLCKTRMIOS", Const, 0, ""},
+ {"TIOCGLINED", Const, 1, ""},
+ {"TIOCGPGRP", Const, 0, ""},
+ {"TIOCGPTN", Const, 0, ""},
+ {"TIOCGQSIZE", Const, 1, ""},
+ {"TIOCGRANTPT", Const, 1, ""},
+ {"TIOCGRS485", Const, 0, ""},
+ {"TIOCGSERIAL", Const, 0, ""},
+ {"TIOCGSID", Const, 0, ""},
+ {"TIOCGSIZE", Const, 1, ""},
+ {"TIOCGSOFTCAR", Const, 0, ""},
+ {"TIOCGTSTAMP", Const, 1, ""},
+ {"TIOCGWINSZ", Const, 0, ""},
+ {"TIOCINQ", Const, 0, ""},
+ {"TIOCIXOFF", Const, 0, ""},
+ {"TIOCIXON", Const, 0, ""},
+ {"TIOCLINUX", Const, 0, ""},
+ {"TIOCMBIC", Const, 0, ""},
+ {"TIOCMBIS", Const, 0, ""},
+ {"TIOCMGDTRWAIT", Const, 0, ""},
+ {"TIOCMGET", Const, 0, ""},
+ {"TIOCMIWAIT", Const, 0, ""},
+ {"TIOCMODG", Const, 0, ""},
+ {"TIOCMODS", Const, 0, ""},
+ {"TIOCMSDTRWAIT", Const, 0, ""},
+ {"TIOCMSET", Const, 0, ""},
+ {"TIOCM_CAR", Const, 0, ""},
+ {"TIOCM_CD", Const, 0, ""},
+ {"TIOCM_CTS", Const, 0, ""},
+ {"TIOCM_DCD", Const, 0, ""},
+ {"TIOCM_DSR", Const, 0, ""},
+ {"TIOCM_DTR", Const, 0, ""},
+ {"TIOCM_LE", Const, 0, ""},
+ {"TIOCM_RI", Const, 0, ""},
+ {"TIOCM_RNG", Const, 0, ""},
+ {"TIOCM_RTS", Const, 0, ""},
+ {"TIOCM_SR", Const, 0, ""},
+ {"TIOCM_ST", Const, 0, ""},
+ {"TIOCNOTTY", Const, 0, ""},
+ {"TIOCNXCL", Const, 0, ""},
+ {"TIOCOUTQ", Const, 0, ""},
+ {"TIOCPKT", Const, 0, ""},
+ {"TIOCPKT_DATA", Const, 0, ""},
+ {"TIOCPKT_DOSTOP", Const, 0, ""},
+ {"TIOCPKT_FLUSHREAD", Const, 0, ""},
+ {"TIOCPKT_FLUSHWRITE", Const, 0, ""},
+ {"TIOCPKT_IOCTL", Const, 0, ""},
+ {"TIOCPKT_NOSTOP", Const, 0, ""},
+ {"TIOCPKT_START", Const, 0, ""},
+ {"TIOCPKT_STOP", Const, 0, ""},
+ {"TIOCPTMASTER", Const, 0, ""},
+ {"TIOCPTMGET", Const, 1, ""},
+ {"TIOCPTSNAME", Const, 1, ""},
+ {"TIOCPTYGNAME", Const, 0, ""},
+ {"TIOCPTYGRANT", Const, 0, ""},
+ {"TIOCPTYUNLK", Const, 0, ""},
+ {"TIOCRCVFRAME", Const, 1, ""},
+ {"TIOCREMOTE", Const, 0, ""},
+ {"TIOCSBRK", Const, 0, ""},
+ {"TIOCSCONS", Const, 0, ""},
+ {"TIOCSCTTY", Const, 0, ""},
+ {"TIOCSDRAINWAIT", Const, 0, ""},
+ {"TIOCSDTR", Const, 0, ""},
+ {"TIOCSERCONFIG", Const, 0, ""},
+ {"TIOCSERGETLSR", Const, 0, ""},
+ {"TIOCSERGETMULTI", Const, 0, ""},
+ {"TIOCSERGSTRUCT", Const, 0, ""},
+ {"TIOCSERGWILD", Const, 0, ""},
+ {"TIOCSERSETMULTI", Const, 0, ""},
+ {"TIOCSERSWILD", Const, 0, ""},
+ {"TIOCSER_TEMT", Const, 0, ""},
+ {"TIOCSETA", Const, 0, ""},
+ {"TIOCSETAF", Const, 0, ""},
+ {"TIOCSETAW", Const, 0, ""},
+ {"TIOCSETD", Const, 0, ""},
+ {"TIOCSFLAGS", Const, 1, ""},
+ {"TIOCSIG", Const, 0, ""},
+ {"TIOCSLCKTRMIOS", Const, 0, ""},
+ {"TIOCSLINED", Const, 1, ""},
+ {"TIOCSPGRP", Const, 0, ""},
+ {"TIOCSPTLCK", Const, 0, ""},
+ {"TIOCSQSIZE", Const, 1, ""},
+ {"TIOCSRS485", Const, 0, ""},
+ {"TIOCSSERIAL", Const, 0, ""},
+ {"TIOCSSIZE", Const, 1, ""},
+ {"TIOCSSOFTCAR", Const, 0, ""},
+ {"TIOCSTART", Const, 0, ""},
+ {"TIOCSTAT", Const, 0, ""},
+ {"TIOCSTI", Const, 0, ""},
+ {"TIOCSTOP", Const, 0, ""},
+ {"TIOCSTSTAMP", Const, 1, ""},
+ {"TIOCSWINSZ", Const, 0, ""},
+ {"TIOCTIMESTAMP", Const, 0, ""},
+ {"TIOCUCNTL", Const, 0, ""},
+ {"TIOCVHANGUP", Const, 0, ""},
+ {"TIOCXMTFRAME", Const, 1, ""},
+ {"TOKEN_ADJUST_DEFAULT", Const, 0, ""},
+ {"TOKEN_ADJUST_GROUPS", Const, 0, ""},
+ {"TOKEN_ADJUST_PRIVILEGES", Const, 0, ""},
+ {"TOKEN_ADJUST_SESSIONID", Const, 11, ""},
+ {"TOKEN_ALL_ACCESS", Const, 0, ""},
+ {"TOKEN_ASSIGN_PRIMARY", Const, 0, ""},
+ {"TOKEN_DUPLICATE", Const, 0, ""},
+ {"TOKEN_EXECUTE", Const, 0, ""},
+ {"TOKEN_IMPERSONATE", Const, 0, ""},
+ {"TOKEN_QUERY", Const, 0, ""},
+ {"TOKEN_QUERY_SOURCE", Const, 0, ""},
+ {"TOKEN_READ", Const, 0, ""},
+ {"TOKEN_WRITE", Const, 0, ""},
+ {"TOSTOP", Const, 0, ""},
+ {"TRUNCATE_EXISTING", Const, 0, ""},
+ {"TUNATTACHFILTER", Const, 0, ""},
+ {"TUNDETACHFILTER", Const, 0, ""},
+ {"TUNGETFEATURES", Const, 0, ""},
+ {"TUNGETIFF", Const, 0, ""},
+ {"TUNGETSNDBUF", Const, 0, ""},
+ {"TUNGETVNETHDRSZ", Const, 0, ""},
+ {"TUNSETDEBUG", Const, 0, ""},
+ {"TUNSETGROUP", Const, 0, ""},
+ {"TUNSETIFF", Const, 0, ""},
+ {"TUNSETLINK", Const, 0, ""},
+ {"TUNSETNOCSUM", Const, 0, ""},
+ {"TUNSETOFFLOAD", Const, 0, ""},
+ {"TUNSETOWNER", Const, 0, ""},
+ {"TUNSETPERSIST", Const, 0, ""},
+ {"TUNSETSNDBUF", Const, 0, ""},
+ {"TUNSETTXFILTER", Const, 0, ""},
+ {"TUNSETVNETHDRSZ", Const, 0, ""},
+ {"Tee", Func, 0, "func(rfd int, wfd int, len int, flags int) (n int64, err error)"},
+ {"TerminateProcess", Func, 0, ""},
+ {"Termios", Type, 0, ""},
+ {"Termios.Cc", Field, 0, ""},
+ {"Termios.Cflag", Field, 0, ""},
+ {"Termios.Iflag", Field, 0, ""},
+ {"Termios.Ispeed", Field, 0, ""},
+ {"Termios.Lflag", Field, 0, ""},
+ {"Termios.Line", Field, 0, ""},
+ {"Termios.Oflag", Field, 0, ""},
+ {"Termios.Ospeed", Field, 0, ""},
+ {"Termios.Pad_cgo_0", Field, 0, ""},
+ {"Tgkill", Func, 0, "func(tgid int, tid int, sig Signal) (err error)"},
+ {"Time", Func, 0, "func(t *Time_t) (tt Time_t, err error)"},
+ {"Time_t", Type, 0, ""},
+ {"Times", Func, 0, "func(tms *Tms) (ticks uintptr, err error)"},
+ {"Timespec", Type, 0, ""},
+ {"Timespec.Nsec", Field, 0, ""},
+ {"Timespec.Pad_cgo_0", Field, 2, ""},
+ {"Timespec.Sec", Field, 0, ""},
+ {"TimespecToNsec", Func, 0, "func(ts Timespec) int64"},
+ {"Timeval", Type, 0, ""},
+ {"Timeval.Pad_cgo_0", Field, 0, ""},
+ {"Timeval.Sec", Field, 0, ""},
+ {"Timeval.Usec", Field, 0, ""},
+ {"Timeval32", Type, 0, ""},
+ {"Timeval32.Sec", Field, 0, ""},
+ {"Timeval32.Usec", Field, 0, ""},
+ {"TimevalToNsec", Func, 0, "func(tv Timeval) int64"},
+ {"Timex", Type, 0, ""},
+ {"Timex.Calcnt", Field, 0, ""},
+ {"Timex.Constant", Field, 0, ""},
+ {"Timex.Errcnt", Field, 0, ""},
+ {"Timex.Esterror", Field, 0, ""},
+ {"Timex.Freq", Field, 0, ""},
+ {"Timex.Jitcnt", Field, 0, ""},
+ {"Timex.Jitter", Field, 0, ""},
+ {"Timex.Maxerror", Field, 0, ""},
+ {"Timex.Modes", Field, 0, ""},
+ {"Timex.Offset", Field, 0, ""},
+ {"Timex.Pad_cgo_0", Field, 0, ""},
+ {"Timex.Pad_cgo_1", Field, 0, ""},
+ {"Timex.Pad_cgo_2", Field, 0, ""},
+ {"Timex.Pad_cgo_3", Field, 0, ""},
+ {"Timex.Ppsfreq", Field, 0, ""},
+ {"Timex.Precision", Field, 0, ""},
+ {"Timex.Shift", Field, 0, ""},
+ {"Timex.Stabil", Field, 0, ""},
+ {"Timex.Status", Field, 0, ""},
+ {"Timex.Stbcnt", Field, 0, ""},
+ {"Timex.Tai", Field, 0, ""},
+ {"Timex.Tick", Field, 0, ""},
+ {"Timex.Time", Field, 0, ""},
+ {"Timex.Tolerance", Field, 0, ""},
+ {"Timezoneinformation", Type, 0, ""},
+ {"Timezoneinformation.Bias", Field, 0, ""},
+ {"Timezoneinformation.DaylightBias", Field, 0, ""},
+ {"Timezoneinformation.DaylightDate", Field, 0, ""},
+ {"Timezoneinformation.DaylightName", Field, 0, ""},
+ {"Timezoneinformation.StandardBias", Field, 0, ""},
+ {"Timezoneinformation.StandardDate", Field, 0, ""},
+ {"Timezoneinformation.StandardName", Field, 0, ""},
+ {"Tms", Type, 0, ""},
+ {"Tms.Cstime", Field, 0, ""},
+ {"Tms.Cutime", Field, 0, ""},
+ {"Tms.Stime", Field, 0, ""},
+ {"Tms.Utime", Field, 0, ""},
+ {"Token", Type, 0, ""},
+ {"TokenAccessInformation", Const, 0, ""},
+ {"TokenAuditPolicy", Const, 0, ""},
+ {"TokenDefaultDacl", Const, 0, ""},
+ {"TokenElevation", Const, 0, ""},
+ {"TokenElevationType", Const, 0, ""},
+ {"TokenGroups", Const, 0, ""},
+ {"TokenGroupsAndPrivileges", Const, 0, ""},
+ {"TokenHasRestrictions", Const, 0, ""},
+ {"TokenImpersonationLevel", Const, 0, ""},
+ {"TokenIntegrityLevel", Const, 0, ""},
+ {"TokenLinkedToken", Const, 0, ""},
+ {"TokenLogonSid", Const, 0, ""},
+ {"TokenMandatoryPolicy", Const, 0, ""},
+ {"TokenOrigin", Const, 0, ""},
+ {"TokenOwner", Const, 0, ""},
+ {"TokenPrimaryGroup", Const, 0, ""},
+ {"TokenPrivileges", Const, 0, ""},
+ {"TokenRestrictedSids", Const, 0, ""},
+ {"TokenSandBoxInert", Const, 0, ""},
+ {"TokenSessionId", Const, 0, ""},
+ {"TokenSessionReference", Const, 0, ""},
+ {"TokenSource", Const, 0, ""},
+ {"TokenStatistics", Const, 0, ""},
+ {"TokenType", Const, 0, ""},
+ {"TokenUIAccess", Const, 0, ""},
+ {"TokenUser", Const, 0, ""},
+ {"TokenVirtualizationAllowed", Const, 0, ""},
+ {"TokenVirtualizationEnabled", Const, 0, ""},
+ {"Tokenprimarygroup", Type, 0, ""},
+ {"Tokenprimarygroup.PrimaryGroup", Field, 0, ""},
+ {"Tokenuser", Type, 0, ""},
+ {"Tokenuser.User", Field, 0, ""},
+ {"TranslateAccountName", Func, 0, ""},
+ {"TranslateName", Func, 0, ""},
+ {"TransmitFile", Func, 0, ""},
+ {"TransmitFileBuffers", Type, 0, ""},
+ {"TransmitFileBuffers.Head", Field, 0, ""},
+ {"TransmitFileBuffers.HeadLength", Field, 0, ""},
+ {"TransmitFileBuffers.Tail", Field, 0, ""},
+ {"TransmitFileBuffers.TailLength", Field, 0, ""},
+ {"Truncate", Func, 0, "func(path string, length int64) (err error)"},
+ {"UNIX_PATH_MAX", Const, 12, ""},
+ {"USAGE_MATCH_TYPE_AND", Const, 0, ""},
+ {"USAGE_MATCH_TYPE_OR", Const, 0, ""},
+ {"UTF16FromString", Func, 1, ""},
+ {"UTF16PtrFromString", Func, 1, ""},
+ {"UTF16ToString", Func, 0, ""},
+ {"Ucred", Type, 0, ""},
+ {"Ucred.Gid", Field, 0, ""},
+ {"Ucred.Pid", Field, 0, ""},
+ {"Ucred.Uid", Field, 0, ""},
+ {"Umask", Func, 0, "func(mask int) (oldmask int)"},
+ {"Uname", Func, 0, "func(buf *Utsname) (err error)"},
+ {"Undelete", Func, 0, ""},
+ {"UnixCredentials", Func, 0, "func(ucred *Ucred) []byte"},
+ {"UnixRights", Func, 0, "func(fds ...int) []byte"},
+ {"Unlink", Func, 0, "func(path string) error"},
+ {"Unlinkat", Func, 0, "func(dirfd int, path string) error"},
+ {"UnmapViewOfFile", Func, 0, ""},
+ {"Unmount", Func, 0, "func(target string, flags int) (err error)"},
+ {"Unsetenv", Func, 4, "func(key string) error"},
+ {"Unshare", Func, 0, "func(flags int) (err error)"},
+ {"UserInfo10", Type, 0, ""},
+ {"UserInfo10.Comment", Field, 0, ""},
+ {"UserInfo10.FullName", Field, 0, ""},
+ {"UserInfo10.Name", Field, 0, ""},
+ {"UserInfo10.UsrComment", Field, 0, ""},
+ {"Ustat", Func, 0, "func(dev int, ubuf *Ustat_t) (err error)"},
+ {"Ustat_t", Type, 0, ""},
+ {"Ustat_t.Fname", Field, 0, ""},
+ {"Ustat_t.Fpack", Field, 0, ""},
+ {"Ustat_t.Pad_cgo_0", Field, 0, ""},
+ {"Ustat_t.Pad_cgo_1", Field, 0, ""},
+ {"Ustat_t.Tfree", Field, 0, ""},
+ {"Ustat_t.Tinode", Field, 0, ""},
+ {"Utimbuf", Type, 0, ""},
+ {"Utimbuf.Actime", Field, 0, ""},
+ {"Utimbuf.Modtime", Field, 0, ""},
+ {"Utime", Func, 0, "func(path string, buf *Utimbuf) (err error)"},
+ {"Utimes", Func, 0, "func(path string, tv []Timeval) (err error)"},
+ {"UtimesNano", Func, 1, "func(path string, ts []Timespec) (err error)"},
+ {"Utsname", Type, 0, ""},
+ {"Utsname.Domainname", Field, 0, ""},
+ {"Utsname.Machine", Field, 0, ""},
+ {"Utsname.Nodename", Field, 0, ""},
+ {"Utsname.Release", Field, 0, ""},
+ {"Utsname.Sysname", Field, 0, ""},
+ {"Utsname.Version", Field, 0, ""},
+ {"VDISCARD", Const, 0, ""},
+ {"VDSUSP", Const, 1, ""},
+ {"VEOF", Const, 0, ""},
+ {"VEOL", Const, 0, ""},
+ {"VEOL2", Const, 0, ""},
+ {"VERASE", Const, 0, ""},
+ {"VERASE2", Const, 1, ""},
+ {"VINTR", Const, 0, ""},
+ {"VKILL", Const, 0, ""},
+ {"VLNEXT", Const, 0, ""},
+ {"VMIN", Const, 0, ""},
+ {"VQUIT", Const, 0, ""},
+ {"VREPRINT", Const, 0, ""},
+ {"VSTART", Const, 0, ""},
+ {"VSTATUS", Const, 1, ""},
+ {"VSTOP", Const, 0, ""},
+ {"VSUSP", Const, 0, ""},
+ {"VSWTC", Const, 0, ""},
+ {"VT0", Const, 1, ""},
+ {"VT1", Const, 1, ""},
+ {"VTDLY", Const, 1, ""},
+ {"VTIME", Const, 0, ""},
+ {"VWERASE", Const, 0, ""},
+ {"VirtualLock", Func, 0, ""},
+ {"VirtualUnlock", Func, 0, ""},
+ {"WAIT_ABANDONED", Const, 0, ""},
+ {"WAIT_FAILED", Const, 0, ""},
+ {"WAIT_OBJECT_0", Const, 0, ""},
+ {"WAIT_TIMEOUT", Const, 0, ""},
+ {"WALL", Const, 0, ""},
+ {"WALLSIG", Const, 1, ""},
+ {"WALTSIG", Const, 1, ""},
+ {"WCLONE", Const, 0, ""},
+ {"WCONTINUED", Const, 0, ""},
+ {"WCOREFLAG", Const, 0, ""},
+ {"WEXITED", Const, 0, ""},
+ {"WLINUXCLONE", Const, 0, ""},
+ {"WNOHANG", Const, 0, ""},
+ {"WNOTHREAD", Const, 0, ""},
+ {"WNOWAIT", Const, 0, ""},
+ {"WNOZOMBIE", Const, 1, ""},
+ {"WOPTSCHECKED", Const, 1, ""},
+ {"WORDSIZE", Const, 0, ""},
+ {"WSABuf", Type, 0, ""},
+ {"WSABuf.Buf", Field, 0, ""},
+ {"WSABuf.Len", Field, 0, ""},
+ {"WSACleanup", Func, 0, ""},
+ {"WSADESCRIPTION_LEN", Const, 0, ""},
+ {"WSAData", Type, 0, ""},
+ {"WSAData.Description", Field, 0, ""},
+ {"WSAData.HighVersion", Field, 0, ""},
+ {"WSAData.MaxSockets", Field, 0, ""},
+ {"WSAData.MaxUdpDg", Field, 0, ""},
+ {"WSAData.SystemStatus", Field, 0, ""},
+ {"WSAData.VendorInfo", Field, 0, ""},
+ {"WSAData.Version", Field, 0, ""},
+ {"WSAEACCES", Const, 2, ""},
+ {"WSAECONNABORTED", Const, 9, ""},
+ {"WSAECONNRESET", Const, 3, ""},
+ {"WSAENOPROTOOPT", Const, 23, ""},
+ {"WSAEnumProtocols", Func, 2, ""},
+ {"WSAID_CONNECTEX", Var, 1, ""},
+ {"WSAIoctl", Func, 0, ""},
+ {"WSAPROTOCOL_LEN", Const, 2, ""},
+ {"WSAProtocolChain", Type, 2, ""},
+ {"WSAProtocolChain.ChainEntries", Field, 2, ""},
+ {"WSAProtocolChain.ChainLen", Field, 2, ""},
+ {"WSAProtocolInfo", Type, 2, ""},
+ {"WSAProtocolInfo.AddressFamily", Field, 2, ""},
+ {"WSAProtocolInfo.CatalogEntryId", Field, 2, ""},
+ {"WSAProtocolInfo.MaxSockAddr", Field, 2, ""},
+ {"WSAProtocolInfo.MessageSize", Field, 2, ""},
+ {"WSAProtocolInfo.MinSockAddr", Field, 2, ""},
+ {"WSAProtocolInfo.NetworkByteOrder", Field, 2, ""},
+ {"WSAProtocolInfo.Protocol", Field, 2, ""},
+ {"WSAProtocolInfo.ProtocolChain", Field, 2, ""},
+ {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2, ""},
+ {"WSAProtocolInfo.ProtocolName", Field, 2, ""},
+ {"WSAProtocolInfo.ProviderFlags", Field, 2, ""},
+ {"WSAProtocolInfo.ProviderId", Field, 2, ""},
+ {"WSAProtocolInfo.ProviderReserved", Field, 2, ""},
+ {"WSAProtocolInfo.SecurityScheme", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags1", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags2", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags3", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags4", Field, 2, ""},
+ {"WSAProtocolInfo.SocketType", Field, 2, ""},
+ {"WSAProtocolInfo.Version", Field, 2, ""},
+ {"WSARecv", Func, 0, ""},
+ {"WSARecvFrom", Func, 0, ""},
+ {"WSASYS_STATUS_LEN", Const, 0, ""},
+ {"WSASend", Func, 0, ""},
+ {"WSASendTo", Func, 0, ""},
+ {"WSASendto", Func, 0, ""},
+ {"WSAStartup", Func, 0, ""},
+ {"WSTOPPED", Const, 0, ""},
+ {"WTRAPPED", Const, 1, ""},
+ {"WUNTRACED", Const, 0, ""},
+ {"Wait4", Func, 0, "func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)"},
+ {"WaitForSingleObject", Func, 0, ""},
+ {"WaitStatus", Type, 0, ""},
+ {"WaitStatus.ExitCode", Field, 0, ""},
+ {"Win32FileAttributeData", Type, 0, ""},
+ {"Win32FileAttributeData.CreationTime", Field, 0, ""},
+ {"Win32FileAttributeData.FileAttributes", Field, 0, ""},
+ {"Win32FileAttributeData.FileSizeHigh", Field, 0, ""},
+ {"Win32FileAttributeData.FileSizeLow", Field, 0, ""},
+ {"Win32FileAttributeData.LastAccessTime", Field, 0, ""},
+ {"Win32FileAttributeData.LastWriteTime", Field, 0, ""},
+ {"Win32finddata", Type, 0, ""},
+ {"Win32finddata.AlternateFileName", Field, 0, ""},
+ {"Win32finddata.CreationTime", Field, 0, ""},
+ {"Win32finddata.FileAttributes", Field, 0, ""},
+ {"Win32finddata.FileName", Field, 0, ""},
+ {"Win32finddata.FileSizeHigh", Field, 0, ""},
+ {"Win32finddata.FileSizeLow", Field, 0, ""},
+ {"Win32finddata.LastAccessTime", Field, 0, ""},
+ {"Win32finddata.LastWriteTime", Field, 0, ""},
+ {"Win32finddata.Reserved0", Field, 0, ""},
+ {"Win32finddata.Reserved1", Field, 0, ""},
+ {"Write", Func, 0, "func(fd int, p []byte) (n int, err error)"},
+ {"WriteConsole", Func, 1, ""},
+ {"WriteFile", Func, 0, ""},
+ {"X509_ASN_ENCODING", Const, 0, ""},
+ {"XCASE", Const, 0, ""},
+ {"XP1_CONNECTIONLESS", Const, 2, ""},
+ {"XP1_CONNECT_DATA", Const, 2, ""},
+ {"XP1_DISCONNECT_DATA", Const, 2, ""},
+ {"XP1_EXPEDITED_DATA", Const, 2, ""},
+ {"XP1_GRACEFUL_CLOSE", Const, 2, ""},
+ {"XP1_GUARANTEED_DELIVERY", Const, 2, ""},
+ {"XP1_GUARANTEED_ORDER", Const, 2, ""},
+ {"XP1_IFS_HANDLES", Const, 2, ""},
+ {"XP1_MESSAGE_ORIENTED", Const, 2, ""},
+ {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2, ""},
+ {"XP1_MULTIPOINT_DATA_PLANE", Const, 2, ""},
+ {"XP1_PARTIAL_MESSAGE", Const, 2, ""},
+ {"XP1_PSEUDO_STREAM", Const, 2, ""},
+ {"XP1_QOS_SUPPORTED", Const, 2, ""},
+ {"XP1_SAN_SUPPORT_SDP", Const, 2, ""},
+ {"XP1_SUPPORT_BROADCAST", Const, 2, ""},
+ {"XP1_SUPPORT_MULTIPOINT", Const, 2, ""},
+ {"XP1_UNI_RECV", Const, 2, ""},
+ {"XP1_UNI_SEND", Const, 2, ""},
+ },
+ "syscall/js": {
+ {"CopyBytesToGo", Func, 0, ""},
+ {"CopyBytesToJS", Func, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"FuncOf", Func, 0, ""},
+ {"Global", Func, 0, ""},
+ {"Null", Func, 0, ""},
+ {"Type", Type, 0, ""},
+ {"TypeBoolean", Const, 0, ""},
+ {"TypeFunction", Const, 0, ""},
+ {"TypeNull", Const, 0, ""},
+ {"TypeNumber", Const, 0, ""},
+ {"TypeObject", Const, 0, ""},
+ {"TypeString", Const, 0, ""},
+ {"TypeSymbol", Const, 0, ""},
+ {"TypeUndefined", Const, 0, ""},
+ {"Undefined", Func, 0, ""},
+ {"Value", Type, 0, ""},
+ {"ValueError", Type, 0, ""},
+ {"ValueOf", Func, 0, ""},
+ },
+ "testing": {
+ {"(*B).Attr", Method, 25, ""},
+ {"(*B).Chdir", Method, 24, ""},
+ {"(*B).Cleanup", Method, 14, ""},
+ {"(*B).Context", Method, 24, ""},
+ {"(*B).Elapsed", Method, 20, ""},
+ {"(*B).Error", Method, 0, ""},
+ {"(*B).Errorf", Method, 0, ""},
+ {"(*B).Fail", Method, 0, ""},
+ {"(*B).FailNow", Method, 0, ""},
+ {"(*B).Failed", Method, 0, ""},
+ {"(*B).Fatal", Method, 0, ""},
+ {"(*B).Fatalf", Method, 0, ""},
+ {"(*B).Helper", Method, 9, ""},
+ {"(*B).Log", Method, 0, ""},
+ {"(*B).Logf", Method, 0, ""},
+ {"(*B).Loop", Method, 24, ""},
+ {"(*B).Name", Method, 8, ""},
+ {"(*B).Output", Method, 25, ""},
+ {"(*B).ReportAllocs", Method, 1, ""},
+ {"(*B).ReportMetric", Method, 13, ""},
+ {"(*B).ResetTimer", Method, 0, ""},
+ {"(*B).Run", Method, 7, ""},
+ {"(*B).RunParallel", Method, 3, ""},
+ {"(*B).SetBytes", Method, 0, ""},
+ {"(*B).SetParallelism", Method, 3, ""},
+ {"(*B).Setenv", Method, 17, ""},
+ {"(*B).Skip", Method, 1, ""},
+ {"(*B).SkipNow", Method, 1, ""},
+ {"(*B).Skipf", Method, 1, ""},
+ {"(*B).Skipped", Method, 1, ""},
+ {"(*B).StartTimer", Method, 0, ""},
+ {"(*B).StopTimer", Method, 0, ""},
+ {"(*B).TempDir", Method, 15, ""},
+ {"(*F).Add", Method, 18, ""},
+ {"(*F).Attr", Method, 25, ""},
+ {"(*F).Chdir", Method, 24, ""},
+ {"(*F).Cleanup", Method, 18, ""},
+ {"(*F).Context", Method, 24, ""},
+ {"(*F).Error", Method, 18, ""},
+ {"(*F).Errorf", Method, 18, ""},
+ {"(*F).Fail", Method, 18, ""},
+ {"(*F).FailNow", Method, 18, ""},
+ {"(*F).Failed", Method, 18, ""},
+ {"(*F).Fatal", Method, 18, ""},
+ {"(*F).Fatalf", Method, 18, ""},
+ {"(*F).Fuzz", Method, 18, ""},
+ {"(*F).Helper", Method, 18, ""},
+ {"(*F).Log", Method, 18, ""},
+ {"(*F).Logf", Method, 18, ""},
+ {"(*F).Name", Method, 18, ""},
+ {"(*F).Output", Method, 25, ""},
+ {"(*F).Setenv", Method, 18, ""},
+ {"(*F).Skip", Method, 18, ""},
+ {"(*F).SkipNow", Method, 18, ""},
+ {"(*F).Skipf", Method, 18, ""},
+ {"(*F).Skipped", Method, 18, ""},
+ {"(*F).TempDir", Method, 18, ""},
+ {"(*M).Run", Method, 4, ""},
+ {"(*PB).Next", Method, 3, ""},
+ {"(*T).Attr", Method, 25, ""},
+ {"(*T).Chdir", Method, 24, ""},
+ {"(*T).Cleanup", Method, 14, ""},
+ {"(*T).Context", Method, 24, ""},
+ {"(*T).Deadline", Method, 15, ""},
+ {"(*T).Error", Method, 0, ""},
+ {"(*T).Errorf", Method, 0, ""},
+ {"(*T).Fail", Method, 0, ""},
+ {"(*T).FailNow", Method, 0, ""},
+ {"(*T).Failed", Method, 0, ""},
+ {"(*T).Fatal", Method, 0, ""},
+ {"(*T).Fatalf", Method, 0, ""},
+ {"(*T).Helper", Method, 9, ""},
+ {"(*T).Log", Method, 0, ""},
+ {"(*T).Logf", Method, 0, ""},
+ {"(*T).Name", Method, 8, ""},
+ {"(*T).Output", Method, 25, ""},
+ {"(*T).Parallel", Method, 0, ""},
+ {"(*T).Run", Method, 7, ""},
+ {"(*T).Setenv", Method, 17, ""},
+ {"(*T).Skip", Method, 1, ""},
+ {"(*T).SkipNow", Method, 1, ""},
+ {"(*T).Skipf", Method, 1, ""},
+ {"(*T).Skipped", Method, 1, ""},
+ {"(*T).TempDir", Method, 15, ""},
+ {"(BenchmarkResult).AllocedBytesPerOp", Method, 1, ""},
+ {"(BenchmarkResult).AllocsPerOp", Method, 1, ""},
+ {"(BenchmarkResult).MemString", Method, 1, ""},
+ {"(BenchmarkResult).NsPerOp", Method, 0, ""},
+ {"(BenchmarkResult).String", Method, 0, ""},
+ {"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"},
+ {"B", Type, 0, ""},
+ {"B.N", Field, 0, ""},
+ {"Benchmark", Func, 0, "func(f func(b *B)) BenchmarkResult"},
+ {"BenchmarkResult", Type, 0, ""},
+ {"BenchmarkResult.Bytes", Field, 0, ""},
+ {"BenchmarkResult.Extra", Field, 13, ""},
+ {"BenchmarkResult.MemAllocs", Field, 1, ""},
+ {"BenchmarkResult.MemBytes", Field, 1, ""},
+ {"BenchmarkResult.N", Field, 0, ""},
+ {"BenchmarkResult.T", Field, 0, ""},
+ {"Cover", Type, 2, ""},
+ {"Cover.Blocks", Field, 2, ""},
+ {"Cover.Counters", Field, 2, ""},
+ {"Cover.CoveredPackages", Field, 2, ""},
+ {"Cover.Mode", Field, 2, ""},
+ {"CoverBlock", Type, 2, ""},
+ {"CoverBlock.Col0", Field, 2, ""},
+ {"CoverBlock.Col1", Field, 2, ""},
+ {"CoverBlock.Line0", Field, 2, ""},
+ {"CoverBlock.Line1", Field, 2, ""},
+ {"CoverBlock.Stmts", Field, 2, ""},
+ {"CoverMode", Func, 8, "func() string"},
+ {"Coverage", Func, 4, "func() float64"},
+ {"F", Type, 18, ""},
+ {"Init", Func, 13, "func()"},
+ {"InternalBenchmark", Type, 0, ""},
+ {"InternalBenchmark.F", Field, 0, ""},
+ {"InternalBenchmark.Name", Field, 0, ""},
+ {"InternalExample", Type, 0, ""},
+ {"InternalExample.F", Field, 0, ""},
+ {"InternalExample.Name", Field, 0, ""},
+ {"InternalExample.Output", Field, 0, ""},
+ {"InternalExample.Unordered", Field, 7, ""},
+ {"InternalFuzzTarget", Type, 18, ""},
+ {"InternalFuzzTarget.Fn", Field, 18, ""},
+ {"InternalFuzzTarget.Name", Field, 18, ""},
+ {"InternalTest", Type, 0, ""},
+ {"InternalTest.F", Field, 0, ""},
+ {"InternalTest.Name", Field, 0, ""},
+ {"M", Type, 4, ""},
+ {"Main", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)"},
+ {"MainStart", Func, 4, "func(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M"},
+ {"PB", Type, 3, ""},
+ {"RegisterCover", Func, 2, "func(c Cover)"},
+ {"RunBenchmarks", Func, 0, "func(matchString func(pat string, str string) (bool, error), benchmarks []InternalBenchmark)"},
+ {"RunExamples", Func, 0, "func(matchString func(pat string, str string) (bool, error), examples []InternalExample) (ok bool)"},
+ {"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"},
+ {"Short", Func, 0, "func() bool"},
+ {"T", Type, 0, ""},
+ {"TB", Type, 2, ""},
+ {"Testing", Func, 21, "func() bool"},
+ {"Verbose", Func, 1, "func() bool"},
+ },
+ "testing/fstest": {
+ {"(MapFS).Glob", Method, 16, ""},
+ {"(MapFS).Lstat", Method, 25, ""},
+ {"(MapFS).Open", Method, 16, ""},
+ {"(MapFS).ReadDir", Method, 16, ""},
+ {"(MapFS).ReadFile", Method, 16, ""},
+ {"(MapFS).ReadLink", Method, 25, ""},
+ {"(MapFS).Stat", Method, 16, ""},
+ {"(MapFS).Sub", Method, 16, ""},
+ {"MapFS", Type, 16, ""},
+ {"MapFile", Type, 16, ""},
+ {"MapFile.Data", Field, 16, ""},
+ {"MapFile.ModTime", Field, 16, ""},
+ {"MapFile.Mode", Field, 16, ""},
+ {"MapFile.Sys", Field, 16, ""},
+ {"TestFS", Func, 16, "func(fsys fs.FS, expected ...string) error"},
+ },
+ "testing/iotest": {
+ {"DataErrReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"ErrReader", Func, 16, "func(err error) io.Reader"},
+ {"ErrTimeout", Var, 0, ""},
+ {"HalfReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"NewReadLogger", Func, 0, "func(prefix string, r io.Reader) io.Reader"},
+ {"NewWriteLogger", Func, 0, "func(prefix string, w io.Writer) io.Writer"},
+ {"OneByteReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"TestReader", Func, 16, "func(r io.Reader, content []byte) error"},
+ {"TimeoutReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"TruncateWriter", Func, 0, "func(w io.Writer, n int64) io.Writer"},
+ },
+ "testing/quick": {
+ {"(*CheckEqualError).Error", Method, 0, ""},
+ {"(*CheckError).Error", Method, 0, ""},
+ {"(SetupError).Error", Method, 0, ""},
+ {"Check", Func, 0, "func(f any, config *Config) error"},
+ {"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"},
+ {"CheckEqualError", Type, 0, ""},
+ {"CheckEqualError.CheckError", Field, 0, ""},
+ {"CheckEqualError.Out1", Field, 0, ""},
+ {"CheckEqualError.Out2", Field, 0, ""},
+ {"CheckError", Type, 0, ""},
+ {"CheckError.Count", Field, 0, ""},
+ {"CheckError.In", Field, 0, ""},
+ {"Config", Type, 0, ""},
+ {"Config.MaxCount", Field, 0, ""},
+ {"Config.MaxCountScale", Field, 0, ""},
+ {"Config.Rand", Field, 0, ""},
+ {"Config.Values", Field, 0, ""},
+ {"Generator", Type, 0, ""},
+ {"SetupError", Type, 0, ""},
+ {"Value", Func, 0, "func(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool)"},
+ },
+ "testing/slogtest": {
+ {"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"},
+ {"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"},
+ },
+ "testing/synctest": {
+ {"Test", Func, 25, "func(t *testing.T, f func(*testing.T))"},
+ {"Wait", Func, 25, "func()"},
+ },
+ "text/scanner": {
+ {"(*Position).IsValid", Method, 0, ""},
+ {"(*Scanner).Init", Method, 0, ""},
+ {"(*Scanner).IsValid", Method, 0, ""},
+ {"(*Scanner).Next", Method, 0, ""},
+ {"(*Scanner).Peek", Method, 0, ""},
+ {"(*Scanner).Pos", Method, 0, ""},
+ {"(*Scanner).Scan", Method, 0, ""},
+ {"(*Scanner).TokenText", Method, 0, ""},
+ {"(Position).String", Method, 0, ""},
+ {"(Scanner).String", Method, 0, ""},
+ {"Char", Const, 0, ""},
+ {"Comment", Const, 0, ""},
+ {"EOF", Const, 0, ""},
+ {"Float", Const, 0, ""},
+ {"GoTokens", Const, 0, ""},
+ {"GoWhitespace", Const, 0, ""},
+ {"Ident", Const, 0, ""},
+ {"Int", Const, 0, ""},
+ {"Position", Type, 0, ""},
+ {"Position.Column", Field, 0, ""},
+ {"Position.Filename", Field, 0, ""},
+ {"Position.Line", Field, 0, ""},
+ {"Position.Offset", Field, 0, ""},
+ {"RawString", Const, 0, ""},
+ {"ScanChars", Const, 0, ""},
+ {"ScanComments", Const, 0, ""},
+ {"ScanFloats", Const, 0, ""},
+ {"ScanIdents", Const, 0, ""},
+ {"ScanInts", Const, 0, ""},
+ {"ScanRawStrings", Const, 0, ""},
+ {"ScanStrings", Const, 0, ""},
+ {"Scanner", Type, 0, ""},
+ {"Scanner.Error", Field, 0, ""},
+ {"Scanner.ErrorCount", Field, 0, ""},
+ {"Scanner.IsIdentRune", Field, 4, ""},
+ {"Scanner.Mode", Field, 0, ""},
+ {"Scanner.Position", Field, 0, ""},
+ {"Scanner.Whitespace", Field, 0, ""},
+ {"SkipComments", Const, 0, ""},
+ {"String", Const, 0, ""},
+ {"TokenString", Func, 0, "func(tok rune) string"},
+ },
+ "text/tabwriter": {
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Init", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"AlignRight", Const, 0, ""},
+ {"Debug", Const, 0, ""},
+ {"DiscardEmptyColumns", Const, 0, ""},
+ {"Escape", Const, 0, ""},
+ {"FilterHTML", Const, 0, ""},
+ {"NewWriter", Func, 0, "func(output io.Writer, minwidth int, tabwidth int, padding int, padchar byte, flags uint) *Writer"},
+ {"StripEscape", Const, 0, ""},
+ {"TabIndent", Const, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "text/template": {
+ {"(*Template).AddParseTree", Method, 0, ""},
+ {"(*Template).Clone", Method, 0, ""},
+ {"(*Template).DefinedTemplates", Method, 5, ""},
+ {"(*Template).Delims", Method, 0, ""},
+ {"(*Template).Execute", Method, 0, ""},
+ {"(*Template).ExecuteTemplate", Method, 0, ""},
+ {"(*Template).Funcs", Method, 0, ""},
+ {"(*Template).Lookup", Method, 0, ""},
+ {"(*Template).Name", Method, 0, ""},
+ {"(*Template).New", Method, 0, ""},
+ {"(*Template).Option", Method, 5, ""},
+ {"(*Template).Parse", Method, 0, ""},
+ {"(*Template).ParseFS", Method, 16, ""},
+ {"(*Template).ParseFiles", Method, 0, ""},
+ {"(*Template).ParseGlob", Method, 0, ""},
+ {"(*Template).Templates", Method, 0, ""},
+ {"(ExecError).Error", Method, 6, ""},
+ {"(ExecError).Unwrap", Method, 13, ""},
+ {"(Template).Copy", Method, 2, ""},
+ {"(Template).ErrorContext", Method, 1, ""},
+ {"ExecError", Type, 6, ""},
+ {"ExecError.Err", Field, 6, ""},
+ {"ExecError.Name", Field, 6, ""},
+ {"FuncMap", Type, 0, ""},
+ {"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"HTMLEscapeString", Func, 0, "func(s string) string"},
+ {"HTMLEscaper", Func, 0, "func(args ...any) string"},
+ {"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
+ {"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"JSEscapeString", Func, 0, "func(s string) string"},
+ {"JSEscaper", Func, 0, "func(args ...any) string"},
+ {"Must", Func, 0, "func(t *Template, err error) *Template"},
+ {"New", Func, 0, "func(name string) *Template"},
+ {"ParseFS", Func, 16, "func(fsys fs.FS, patterns ...string) (*Template, error)"},
+ {"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
+ {"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
+ {"Template", Type, 0, ""},
+ {"Template.Tree", Field, 0, ""},
+ {"URLQueryEscaper", Func, 0, "func(args ...any) string"},
+ },
+ "text/template/parse": {
+ {"(*ActionNode).Copy", Method, 0, ""},
+ {"(*ActionNode).String", Method, 0, ""},
+ {"(*BoolNode).Copy", Method, 0, ""},
+ {"(*BoolNode).String", Method, 0, ""},
+ {"(*BranchNode).Copy", Method, 4, ""},
+ {"(*BranchNode).String", Method, 0, ""},
+ {"(*BreakNode).Copy", Method, 18, ""},
+ {"(*BreakNode).String", Method, 18, ""},
+ {"(*ChainNode).Add", Method, 1, ""},
+ {"(*ChainNode).Copy", Method, 1, ""},
+ {"(*ChainNode).String", Method, 1, ""},
+ {"(*CommandNode).Copy", Method, 0, ""},
+ {"(*CommandNode).String", Method, 0, ""},
+ {"(*CommentNode).Copy", Method, 16, ""},
+ {"(*CommentNode).String", Method, 16, ""},
+ {"(*ContinueNode).Copy", Method, 18, ""},
+ {"(*ContinueNode).String", Method, 18, ""},
+ {"(*DotNode).Copy", Method, 0, ""},
+ {"(*DotNode).String", Method, 0, ""},
+ {"(*DotNode).Type", Method, 0, ""},
+ {"(*FieldNode).Copy", Method, 0, ""},
+ {"(*FieldNode).String", Method, 0, ""},
+ {"(*IdentifierNode).Copy", Method, 0, ""},
+ {"(*IdentifierNode).SetPos", Method, 1, ""},
+ {"(*IdentifierNode).SetTree", Method, 4, ""},
+ {"(*IdentifierNode).String", Method, 0, ""},
+ {"(*IfNode).Copy", Method, 0, ""},
+ {"(*IfNode).String", Method, 0, ""},
+ {"(*ListNode).Copy", Method, 0, ""},
+ {"(*ListNode).CopyList", Method, 0, ""},
+ {"(*ListNode).String", Method, 0, ""},
+ {"(*NilNode).Copy", Method, 1, ""},
+ {"(*NilNode).String", Method, 1, ""},
+ {"(*NilNode).Type", Method, 1, ""},
+ {"(*NumberNode).Copy", Method, 0, ""},
+ {"(*NumberNode).String", Method, 0, ""},
+ {"(*PipeNode).Copy", Method, 0, ""},
+ {"(*PipeNode).CopyPipe", Method, 0, ""},
+ {"(*PipeNode).String", Method, 0, ""},
+ {"(*RangeNode).Copy", Method, 0, ""},
+ {"(*RangeNode).String", Method, 0, ""},
+ {"(*StringNode).Copy", Method, 0, ""},
+ {"(*StringNode).String", Method, 0, ""},
+ {"(*TemplateNode).Copy", Method, 0, ""},
+ {"(*TemplateNode).String", Method, 0, ""},
+ {"(*TextNode).Copy", Method, 0, ""},
+ {"(*TextNode).String", Method, 0, ""},
+ {"(*Tree).Copy", Method, 2, ""},
+ {"(*Tree).ErrorContext", Method, 1, ""},
+ {"(*Tree).Parse", Method, 0, ""},
+ {"(*VariableNode).Copy", Method, 0, ""},
+ {"(*VariableNode).String", Method, 0, ""},
+ {"(*WithNode).Copy", Method, 0, ""},
+ {"(*WithNode).String", Method, 0, ""},
+ {"(ActionNode).Position", Method, 1, ""},
+ {"(ActionNode).Type", Method, 0, ""},
+ {"(BoolNode).Position", Method, 1, ""},
+ {"(BoolNode).Type", Method, 0, ""},
+ {"(BranchNode).Position", Method, 1, ""},
+ {"(BranchNode).Type", Method, 0, ""},
+ {"(BreakNode).Position", Method, 18, ""},
+ {"(BreakNode).Type", Method, 18, ""},
+ {"(ChainNode).Position", Method, 1, ""},
+ {"(ChainNode).Type", Method, 1, ""},
+ {"(CommandNode).Position", Method, 1, ""},
+ {"(CommandNode).Type", Method, 0, ""},
+ {"(CommentNode).Position", Method, 16, ""},
+ {"(CommentNode).Type", Method, 16, ""},
+ {"(ContinueNode).Position", Method, 18, ""},
+ {"(ContinueNode).Type", Method, 18, ""},
+ {"(DotNode).Position", Method, 1, ""},
+ {"(FieldNode).Position", Method, 1, ""},
+ {"(FieldNode).Type", Method, 0, ""},
+ {"(IdentifierNode).Position", Method, 1, ""},
+ {"(IdentifierNode).Type", Method, 0, ""},
+ {"(IfNode).Position", Method, 1, ""},
+ {"(IfNode).Type", Method, 0, ""},
+ {"(ListNode).Position", Method, 1, ""},
+ {"(ListNode).Type", Method, 0, ""},
+ {"(NilNode).Position", Method, 1, ""},
+ {"(NodeType).Type", Method, 0, ""},
+ {"(NumberNode).Position", Method, 1, ""},
+ {"(NumberNode).Type", Method, 0, ""},
+ {"(PipeNode).Position", Method, 1, ""},
+ {"(PipeNode).Type", Method, 0, ""},
+ {"(Pos).Position", Method, 1, ""},
+ {"(RangeNode).Position", Method, 1, ""},
+ {"(RangeNode).Type", Method, 0, ""},
+ {"(StringNode).Position", Method, 1, ""},
+ {"(StringNode).Type", Method, 0, ""},
+ {"(TemplateNode).Position", Method, 1, ""},
+ {"(TemplateNode).Type", Method, 0, ""},
+ {"(TextNode).Position", Method, 1, ""},
+ {"(TextNode).Type", Method, 0, ""},
+ {"(VariableNode).Position", Method, 1, ""},
+ {"(VariableNode).Type", Method, 0, ""},
+ {"(WithNode).Position", Method, 1, ""},
+ {"(WithNode).Type", Method, 0, ""},
+ {"ActionNode", Type, 0, ""},
+ {"ActionNode.Line", Field, 0, ""},
+ {"ActionNode.NodeType", Field, 0, ""},
+ {"ActionNode.Pipe", Field, 0, ""},
+ {"ActionNode.Pos", Field, 1, ""},
+ {"BoolNode", Type, 0, ""},
+ {"BoolNode.NodeType", Field, 0, ""},
+ {"BoolNode.Pos", Field, 1, ""},
+ {"BoolNode.True", Field, 0, ""},
+ {"BranchNode", Type, 0, ""},
+ {"BranchNode.ElseList", Field, 0, ""},
+ {"BranchNode.Line", Field, 0, ""},
+ {"BranchNode.List", Field, 0, ""},
+ {"BranchNode.NodeType", Field, 0, ""},
+ {"BranchNode.Pipe", Field, 0, ""},
+ {"BranchNode.Pos", Field, 1, ""},
+ {"BreakNode", Type, 18, ""},
+ {"BreakNode.Line", Field, 18, ""},
+ {"BreakNode.NodeType", Field, 18, ""},
+ {"BreakNode.Pos", Field, 18, ""},
+ {"ChainNode", Type, 1, ""},
+ {"ChainNode.Field", Field, 1, ""},
+ {"ChainNode.Node", Field, 1, ""},
+ {"ChainNode.NodeType", Field, 1, ""},
+ {"ChainNode.Pos", Field, 1, ""},
+ {"CommandNode", Type, 0, ""},
+ {"CommandNode.Args", Field, 0, ""},
+ {"CommandNode.NodeType", Field, 0, ""},
+ {"CommandNode.Pos", Field, 1, ""},
+ {"CommentNode", Type, 16, ""},
+ {"CommentNode.NodeType", Field, 16, ""},
+ {"CommentNode.Pos", Field, 16, ""},
+ {"CommentNode.Text", Field, 16, ""},
+ {"ContinueNode", Type, 18, ""},
+ {"ContinueNode.Line", Field, 18, ""},
+ {"ContinueNode.NodeType", Field, 18, ""},
+ {"ContinueNode.Pos", Field, 18, ""},
+ {"DotNode", Type, 0, ""},
+ {"DotNode.NodeType", Field, 4, ""},
+ {"DotNode.Pos", Field, 1, ""},
+ {"FieldNode", Type, 0, ""},
+ {"FieldNode.Ident", Field, 0, ""},
+ {"FieldNode.NodeType", Field, 0, ""},
+ {"FieldNode.Pos", Field, 1, ""},
+ {"IdentifierNode", Type, 0, ""},
+ {"IdentifierNode.Ident", Field, 0, ""},
+ {"IdentifierNode.NodeType", Field, 0, ""},
+ {"IdentifierNode.Pos", Field, 1, ""},
+ {"IfNode", Type, 0, ""},
+ {"IfNode.BranchNode", Field, 0, ""},
+ {"IsEmptyTree", Func, 0, "func(n Node) bool"},
+ {"ListNode", Type, 0, ""},
+ {"ListNode.NodeType", Field, 0, ""},
+ {"ListNode.Nodes", Field, 0, ""},
+ {"ListNode.Pos", Field, 1, ""},
+ {"Mode", Type, 16, ""},
+ {"New", Func, 0, "func(name string, funcs ...map[string]any) *Tree"},
+ {"NewIdentifier", Func, 0, "func(ident string) *IdentifierNode"},
+ {"NilNode", Type, 1, ""},
+ {"NilNode.NodeType", Field, 4, ""},
+ {"NilNode.Pos", Field, 1, ""},
+ {"Node", Type, 0, ""},
+ {"NodeAction", Const, 0, ""},
+ {"NodeBool", Const, 0, ""},
+ {"NodeBreak", Const, 18, ""},
+ {"NodeChain", Const, 1, ""},
+ {"NodeCommand", Const, 0, ""},
+ {"NodeComment", Const, 16, ""},
+ {"NodeContinue", Const, 18, ""},
+ {"NodeDot", Const, 0, ""},
+ {"NodeField", Const, 0, ""},
+ {"NodeIdentifier", Const, 0, ""},
+ {"NodeIf", Const, 0, ""},
+ {"NodeList", Const, 0, ""},
+ {"NodeNil", Const, 1, ""},
+ {"NodeNumber", Const, 0, ""},
+ {"NodePipe", Const, 0, ""},
+ {"NodeRange", Const, 0, ""},
+ {"NodeString", Const, 0, ""},
+ {"NodeTemplate", Const, 0, ""},
+ {"NodeText", Const, 0, ""},
+ {"NodeType", Type, 0, ""},
+ {"NodeVariable", Const, 0, ""},
+ {"NodeWith", Const, 0, ""},
+ {"NumberNode", Type, 0, ""},
+ {"NumberNode.Complex128", Field, 0, ""},
+ {"NumberNode.Float64", Field, 0, ""},
+ {"NumberNode.Int64", Field, 0, ""},
+ {"NumberNode.IsComplex", Field, 0, ""},
+ {"NumberNode.IsFloat", Field, 0, ""},
+ {"NumberNode.IsInt", Field, 0, ""},
+ {"NumberNode.IsUint", Field, 0, ""},
+ {"NumberNode.NodeType", Field, 0, ""},
+ {"NumberNode.Pos", Field, 1, ""},
+ {"NumberNode.Text", Field, 0, ""},
+ {"NumberNode.Uint64", Field, 0, ""},
+ {"Parse", Func, 0, "func(name string, text string, leftDelim string, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error)"},
+ {"ParseComments", Const, 16, ""},
+ {"PipeNode", Type, 0, ""},
+ {"PipeNode.Cmds", Field, 0, ""},
+ {"PipeNode.Decl", Field, 0, ""},
+ {"PipeNode.IsAssign", Field, 11, ""},
+ {"PipeNode.Line", Field, 0, ""},
+ {"PipeNode.NodeType", Field, 0, ""},
+ {"PipeNode.Pos", Field, 1, ""},
+ {"Pos", Type, 1, ""},
+ {"RangeNode", Type, 0, ""},
+ {"RangeNode.BranchNode", Field, 0, ""},
+ {"SkipFuncCheck", Const, 17, ""},
+ {"StringNode", Type, 0, ""},
+ {"StringNode.NodeType", Field, 0, ""},
+ {"StringNode.Pos", Field, 1, ""},
+ {"StringNode.Quoted", Field, 0, ""},
+ {"StringNode.Text", Field, 0, ""},
+ {"TemplateNode", Type, 0, ""},
+ {"TemplateNode.Line", Field, 0, ""},
+ {"TemplateNode.Name", Field, 0, ""},
+ {"TemplateNode.NodeType", Field, 0, ""},
+ {"TemplateNode.Pipe", Field, 0, ""},
+ {"TemplateNode.Pos", Field, 1, ""},
+ {"TextNode", Type, 0, ""},
+ {"TextNode.NodeType", Field, 0, ""},
+ {"TextNode.Pos", Field, 1, ""},
+ {"TextNode.Text", Field, 0, ""},
+ {"Tree", Type, 0, ""},
+ {"Tree.Mode", Field, 16, ""},
+ {"Tree.Name", Field, 0, ""},
+ {"Tree.ParseName", Field, 1, ""},
+ {"Tree.Root", Field, 0, ""},
+ {"VariableNode", Type, 0, ""},
+ {"VariableNode.Ident", Field, 0, ""},
+ {"VariableNode.NodeType", Field, 0, ""},
+ {"VariableNode.Pos", Field, 1, ""},
+ {"WithNode", Type, 0, ""},
+ {"WithNode.BranchNode", Field, 0, ""},
+ },
+ "time": {
+ {"(*Location).String", Method, 0, ""},
+ {"(*ParseError).Error", Method, 0, ""},
+ {"(*Ticker).Reset", Method, 15, ""},
+ {"(*Ticker).Stop", Method, 0, ""},
+ {"(*Time).GobDecode", Method, 0, ""},
+ {"(*Time).UnmarshalBinary", Method, 2, ""},
+ {"(*Time).UnmarshalJSON", Method, 0, ""},
+ {"(*Time).UnmarshalText", Method, 2, ""},
+ {"(*Timer).Reset", Method, 1, ""},
+ {"(*Timer).Stop", Method, 0, ""},
+ {"(Duration).Abs", Method, 19, ""},
+ {"(Duration).Hours", Method, 0, ""},
+ {"(Duration).Microseconds", Method, 13, ""},
+ {"(Duration).Milliseconds", Method, 13, ""},
+ {"(Duration).Minutes", Method, 0, ""},
+ {"(Duration).Nanoseconds", Method, 0, ""},
+ {"(Duration).Round", Method, 9, ""},
+ {"(Duration).Seconds", Method, 0, ""},
+ {"(Duration).String", Method, 0, ""},
+ {"(Duration).Truncate", Method, 9, ""},
+ {"(Month).String", Method, 0, ""},
+ {"(Time).Add", Method, 0, ""},
+ {"(Time).AddDate", Method, 0, ""},
+ {"(Time).After", Method, 0, ""},
+ {"(Time).AppendBinary", Method, 24, ""},
+ {"(Time).AppendFormat", Method, 5, ""},
+ {"(Time).AppendText", Method, 24, ""},
+ {"(Time).Before", Method, 0, ""},
+ {"(Time).Clock", Method, 0, ""},
+ {"(Time).Compare", Method, 20, ""},
+ {"(Time).Date", Method, 0, ""},
+ {"(Time).Day", Method, 0, ""},
+ {"(Time).Equal", Method, 0, ""},
+ {"(Time).Format", Method, 0, ""},
+ {"(Time).GoString", Method, 17, ""},
+ {"(Time).GobEncode", Method, 0, ""},
+ {"(Time).Hour", Method, 0, ""},
+ {"(Time).ISOWeek", Method, 0, ""},
+ {"(Time).In", Method, 0, ""},
+ {"(Time).IsDST", Method, 17, ""},
+ {"(Time).IsZero", Method, 0, ""},
+ {"(Time).Local", Method, 0, ""},
+ {"(Time).Location", Method, 0, ""},
+ {"(Time).MarshalBinary", Method, 2, ""},
+ {"(Time).MarshalJSON", Method, 0, ""},
+ {"(Time).MarshalText", Method, 2, ""},
+ {"(Time).Minute", Method, 0, ""},
+ {"(Time).Month", Method, 0, ""},
+ {"(Time).Nanosecond", Method, 0, ""},
+ {"(Time).Round", Method, 1, ""},
+ {"(Time).Second", Method, 0, ""},
+ {"(Time).String", Method, 0, ""},
+ {"(Time).Sub", Method, 0, ""},
+ {"(Time).Truncate", Method, 1, ""},
+ {"(Time).UTC", Method, 0, ""},
+ {"(Time).Unix", Method, 0, ""},
+ {"(Time).UnixMicro", Method, 17, ""},
+ {"(Time).UnixMilli", Method, 17, ""},
+ {"(Time).UnixNano", Method, 0, ""},
+ {"(Time).Weekday", Method, 0, ""},
+ {"(Time).Year", Method, 0, ""},
+ {"(Time).YearDay", Method, 1, ""},
+ {"(Time).Zone", Method, 0, ""},
+ {"(Time).ZoneBounds", Method, 19, ""},
+ {"(Weekday).String", Method, 0, ""},
+ {"ANSIC", Const, 0, ""},
+ {"After", Func, 0, "func(d Duration) <-chan Time"},
+ {"AfterFunc", Func, 0, "func(d Duration, f func()) *Timer"},
+ {"April", Const, 0, ""},
+ {"August", Const, 0, ""},
+ {"Date", Func, 0, "func(year int, month Month, day int, hour int, min int, sec int, nsec int, loc *Location) Time"},
+ {"DateOnly", Const, 20, ""},
+ {"DateTime", Const, 20, ""},
+ {"December", Const, 0, ""},
+ {"Duration", Type, 0, ""},
+ {"February", Const, 0, ""},
+ {"FixedZone", Func, 0, "func(name string, offset int) *Location"},
+ {"Friday", Const, 0, ""},
+ {"Hour", Const, 0, ""},
+ {"January", Const, 0, ""},
+ {"July", Const, 0, ""},
+ {"June", Const, 0, ""},
+ {"Kitchen", Const, 0, ""},
+ {"Layout", Const, 17, ""},
+ {"LoadLocation", Func, 0, "func(name string) (*Location, error)"},
+ {"LoadLocationFromTZData", Func, 10, "func(name string, data []byte) (*Location, error)"},
+ {"Local", Var, 0, ""},
+ {"Location", Type, 0, ""},
+ {"March", Const, 0, ""},
+ {"May", Const, 0, ""},
+ {"Microsecond", Const, 0, ""},
+ {"Millisecond", Const, 0, ""},
+ {"Minute", Const, 0, ""},
+ {"Monday", Const, 0, ""},
+ {"Month", Type, 0, ""},
+ {"Nanosecond", Const, 0, ""},
+ {"NewTicker", Func, 0, "func(d Duration) *Ticker"},
+ {"NewTimer", Func, 0, "func(d Duration) *Timer"},
+ {"November", Const, 0, ""},
+ {"Now", Func, 0, "func() Time"},
+ {"October", Const, 0, ""},
+ {"Parse", Func, 0, "func(layout string, value string) (Time, error)"},
+ {"ParseDuration", Func, 0, "func(s string) (Duration, error)"},
+ {"ParseError", Type, 0, ""},
+ {"ParseError.Layout", Field, 0, ""},
+ {"ParseError.LayoutElem", Field, 0, ""},
+ {"ParseError.Message", Field, 0, ""},
+ {"ParseError.Value", Field, 0, ""},
+ {"ParseError.ValueElem", Field, 0, ""},
+ {"ParseInLocation", Func, 1, "func(layout string, value string, loc *Location) (Time, error)"},
+ {"RFC1123", Const, 0, ""},
+ {"RFC1123Z", Const, 0, ""},
+ {"RFC3339", Const, 0, ""},
+ {"RFC3339Nano", Const, 0, ""},
+ {"RFC822", Const, 0, ""},
+ {"RFC822Z", Const, 0, ""},
+ {"RFC850", Const, 0, ""},
+ {"RubyDate", Const, 0, ""},
+ {"Saturday", Const, 0, ""},
+ {"Second", Const, 0, ""},
+ {"September", Const, 0, ""},
+ {"Since", Func, 0, "func(t Time) Duration"},
+ {"Sleep", Func, 0, "func(d Duration)"},
+ {"Stamp", Const, 0, ""},
+ {"StampMicro", Const, 0, ""},
+ {"StampMilli", Const, 0, ""},
+ {"StampNano", Const, 0, ""},
+ {"Sunday", Const, 0, ""},
+ {"Thursday", Const, 0, ""},
+ {"Tick", Func, 0, "func(d Duration) <-chan Time"},
+ {"Ticker", Type, 0, ""},
+ {"Ticker.C", Field, 0, ""},
+ {"Time", Type, 0, ""},
+ {"TimeOnly", Const, 20, ""},
+ {"Timer", Type, 0, ""},
+ {"Timer.C", Field, 0, ""},
+ {"Tuesday", Const, 0, ""},
+ {"UTC", Var, 0, ""},
+ {"Unix", Func, 0, "func(sec int64, nsec int64) Time"},
+ {"UnixDate", Const, 0, ""},
+ {"UnixMicro", Func, 17, "func(usec int64) Time"},
+ {"UnixMilli", Func, 17, "func(msec int64) Time"},
+ {"Until", Func, 8, "func(t Time) Duration"},
+ {"Wednesday", Const, 0, ""},
+ {"Weekday", Type, 0, ""},
+ },
+ "unicode": {
+ {"(SpecialCase).ToLower", Method, 0, ""},
+ {"(SpecialCase).ToTitle", Method, 0, ""},
+ {"(SpecialCase).ToUpper", Method, 0, ""},
+ {"ASCII_Hex_Digit", Var, 0, ""},
+ {"Adlam", Var, 7, ""},
+ {"Ahom", Var, 5, ""},
+ {"Anatolian_Hieroglyphs", Var, 5, ""},
+ {"Arabic", Var, 0, ""},
+ {"Armenian", Var, 0, ""},
+ {"Avestan", Var, 0, ""},
+ {"AzeriCase", Var, 0, ""},
+ {"Balinese", Var, 0, ""},
+ {"Bamum", Var, 0, ""},
+ {"Bassa_Vah", Var, 4, ""},
+ {"Batak", Var, 0, ""},
+ {"Bengali", Var, 0, ""},
+ {"Bhaiksuki", Var, 7, ""},
+ {"Bidi_Control", Var, 0, ""},
+ {"Bopomofo", Var, 0, ""},
+ {"Brahmi", Var, 0, ""},
+ {"Braille", Var, 0, ""},
+ {"Buginese", Var, 0, ""},
+ {"Buhid", Var, 0, ""},
+ {"C", Var, 0, ""},
+ {"Canadian_Aboriginal", Var, 0, ""},
+ {"Carian", Var, 0, ""},
+ {"CaseRange", Type, 0, ""},
+ {"CaseRange.Delta", Field, 0, ""},
+ {"CaseRange.Hi", Field, 0, ""},
+ {"CaseRange.Lo", Field, 0, ""},
+ {"CaseRanges", Var, 0, ""},
+ {"Categories", Var, 0, ""},
+ {"CategoryAliases", Var, 25, ""},
+ {"Caucasian_Albanian", Var, 4, ""},
+ {"Cc", Var, 0, ""},
+ {"Cf", Var, 0, ""},
+ {"Chakma", Var, 1, ""},
+ {"Cham", Var, 0, ""},
+ {"Cherokee", Var, 0, ""},
+ {"Chorasmian", Var, 16, ""},
+ {"Cn", Var, 25, ""},
+ {"Co", Var, 0, ""},
+ {"Common", Var, 0, ""},
+ {"Coptic", Var, 0, ""},
+ {"Cs", Var, 0, ""},
+ {"Cuneiform", Var, 0, ""},
+ {"Cypriot", Var, 0, ""},
+ {"Cypro_Minoan", Var, 21, ""},
+ {"Cyrillic", Var, 0, ""},
+ {"Dash", Var, 0, ""},
+ {"Deprecated", Var, 0, ""},
+ {"Deseret", Var, 0, ""},
+ {"Devanagari", Var, 0, ""},
+ {"Diacritic", Var, 0, ""},
+ {"Digit", Var, 0, ""},
+ {"Dives_Akuru", Var, 16, ""},
+ {"Dogra", Var, 13, ""},
+ {"Duployan", Var, 4, ""},
+ {"Egyptian_Hieroglyphs", Var, 0, ""},
+ {"Elbasan", Var, 4, ""},
+ {"Elymaic", Var, 14, ""},
+ {"Ethiopic", Var, 0, ""},
+ {"Extender", Var, 0, ""},
+ {"FoldCategory", Var, 0, ""},
+ {"FoldScript", Var, 0, ""},
+ {"Georgian", Var, 0, ""},
+ {"Glagolitic", Var, 0, ""},
+ {"Gothic", Var, 0, ""},
+ {"Grantha", Var, 4, ""},
+ {"GraphicRanges", Var, 0, ""},
+ {"Greek", Var, 0, ""},
+ {"Gujarati", Var, 0, ""},
+ {"Gunjala_Gondi", Var, 13, ""},
+ {"Gurmukhi", Var, 0, ""},
+ {"Han", Var, 0, ""},
+ {"Hangul", Var, 0, ""},
+ {"Hanifi_Rohingya", Var, 13, ""},
+ {"Hanunoo", Var, 0, ""},
+ {"Hatran", Var, 5, ""},
+ {"Hebrew", Var, 0, ""},
+ {"Hex_Digit", Var, 0, ""},
+ {"Hiragana", Var, 0, ""},
+ {"Hyphen", Var, 0, ""},
+ {"IDS_Binary_Operator", Var, 0, ""},
+ {"IDS_Trinary_Operator", Var, 0, ""},
+ {"Ideographic", Var, 0, ""},
+ {"Imperial_Aramaic", Var, 0, ""},
+ {"In", Func, 2, "func(r rune, ranges ...*RangeTable) bool"},
+ {"Inherited", Var, 0, ""},
+ {"Inscriptional_Pahlavi", Var, 0, ""},
+ {"Inscriptional_Parthian", Var, 0, ""},
+ {"Is", Func, 0, "func(rangeTab *RangeTable, r rune) bool"},
+ {"IsControl", Func, 0, "func(r rune) bool"},
+ {"IsDigit", Func, 0, "func(r rune) bool"},
+ {"IsGraphic", Func, 0, "func(r rune) bool"},
+ {"IsLetter", Func, 0, "func(r rune) bool"},
+ {"IsLower", Func, 0, "func(r rune) bool"},
+ {"IsMark", Func, 0, "func(r rune) bool"},
+ {"IsNumber", Func, 0, "func(r rune) bool"},
+ {"IsOneOf", Func, 0, "func(ranges []*RangeTable, r rune) bool"},
+ {"IsPrint", Func, 0, "func(r rune) bool"},
+ {"IsPunct", Func, 0, "func(r rune) bool"},
+ {"IsSpace", Func, 0, "func(r rune) bool"},
+ {"IsSymbol", Func, 0, "func(r rune) bool"},
+ {"IsTitle", Func, 0, "func(r rune) bool"},
+ {"IsUpper", Func, 0, "func(r rune) bool"},
+ {"Javanese", Var, 0, ""},
+ {"Join_Control", Var, 0, ""},
+ {"Kaithi", Var, 0, ""},
+ {"Kannada", Var, 0, ""},
+ {"Katakana", Var, 0, ""},
+ {"Kawi", Var, 21, ""},
+ {"Kayah_Li", Var, 0, ""},
+ {"Kharoshthi", Var, 0, ""},
+ {"Khitan_Small_Script", Var, 16, ""},
+ {"Khmer", Var, 0, ""},
+ {"Khojki", Var, 4, ""},
+ {"Khudawadi", Var, 4, ""},
+ {"L", Var, 0, ""},
+ {"LC", Var, 25, ""},
+ {"Lao", Var, 0, ""},
+ {"Latin", Var, 0, ""},
+ {"Lepcha", Var, 0, ""},
+ {"Letter", Var, 0, ""},
+ {"Limbu", Var, 0, ""},
+ {"Linear_A", Var, 4, ""},
+ {"Linear_B", Var, 0, ""},
+ {"Lisu", Var, 0, ""},
+ {"Ll", Var, 0, ""},
+ {"Lm", Var, 0, ""},
+ {"Lo", Var, 0, ""},
+ {"Logical_Order_Exception", Var, 0, ""},
+ {"Lower", Var, 0, ""},
+ {"LowerCase", Const, 0, ""},
+ {"Lt", Var, 0, ""},
+ {"Lu", Var, 0, ""},
+ {"Lycian", Var, 0, ""},
+ {"Lydian", Var, 0, ""},
+ {"M", Var, 0, ""},
+ {"Mahajani", Var, 4, ""},
+ {"Makasar", Var, 13, ""},
+ {"Malayalam", Var, 0, ""},
+ {"Mandaic", Var, 0, ""},
+ {"Manichaean", Var, 4, ""},
+ {"Marchen", Var, 7, ""},
+ {"Mark", Var, 0, ""},
+ {"Masaram_Gondi", Var, 10, ""},
+ {"MaxASCII", Const, 0, ""},
+ {"MaxCase", Const, 0, ""},
+ {"MaxLatin1", Const, 0, ""},
+ {"MaxRune", Const, 0, ""},
+ {"Mc", Var, 0, ""},
+ {"Me", Var, 0, ""},
+ {"Medefaidrin", Var, 13, ""},
+ {"Meetei_Mayek", Var, 0, ""},
+ {"Mende_Kikakui", Var, 4, ""},
+ {"Meroitic_Cursive", Var, 1, ""},
+ {"Meroitic_Hieroglyphs", Var, 1, ""},
+ {"Miao", Var, 1, ""},
+ {"Mn", Var, 0, ""},
+ {"Modi", Var, 4, ""},
+ {"Mongolian", Var, 0, ""},
+ {"Mro", Var, 4, ""},
+ {"Multani", Var, 5, ""},
+ {"Myanmar", Var, 0, ""},
+ {"N", Var, 0, ""},
+ {"Nabataean", Var, 4, ""},
+ {"Nag_Mundari", Var, 21, ""},
+ {"Nandinagari", Var, 14, ""},
+ {"Nd", Var, 0, ""},
+ {"New_Tai_Lue", Var, 0, ""},
+ {"Newa", Var, 7, ""},
+ {"Nko", Var, 0, ""},
+ {"Nl", Var, 0, ""},
+ {"No", Var, 0, ""},
+ {"Noncharacter_Code_Point", Var, 0, ""},
+ {"Number", Var, 0, ""},
+ {"Nushu", Var, 10, ""},
+ {"Nyiakeng_Puachue_Hmong", Var, 14, ""},
+ {"Ogham", Var, 0, ""},
+ {"Ol_Chiki", Var, 0, ""},
+ {"Old_Hungarian", Var, 5, ""},
+ {"Old_Italic", Var, 0, ""},
+ {"Old_North_Arabian", Var, 4, ""},
+ {"Old_Permic", Var, 4, ""},
+ {"Old_Persian", Var, 0, ""},
+ {"Old_Sogdian", Var, 13, ""},
+ {"Old_South_Arabian", Var, 0, ""},
+ {"Old_Turkic", Var, 0, ""},
+ {"Old_Uyghur", Var, 21, ""},
+ {"Oriya", Var, 0, ""},
+ {"Osage", Var, 7, ""},
+ {"Osmanya", Var, 0, ""},
+ {"Other", Var, 0, ""},
+ {"Other_Alphabetic", Var, 0, ""},
+ {"Other_Default_Ignorable_Code_Point", Var, 0, ""},
+ {"Other_Grapheme_Extend", Var, 0, ""},
+ {"Other_ID_Continue", Var, 0, ""},
+ {"Other_ID_Start", Var, 0, ""},
+ {"Other_Lowercase", Var, 0, ""},
+ {"Other_Math", Var, 0, ""},
+ {"Other_Uppercase", Var, 0, ""},
+ {"P", Var, 0, ""},
+ {"Pahawh_Hmong", Var, 4, ""},
+ {"Palmyrene", Var, 4, ""},
+ {"Pattern_Syntax", Var, 0, ""},
+ {"Pattern_White_Space", Var, 0, ""},
+ {"Pau_Cin_Hau", Var, 4, ""},
+ {"Pc", Var, 0, ""},
+ {"Pd", Var, 0, ""},
+ {"Pe", Var, 0, ""},
+ {"Pf", Var, 0, ""},
+ {"Phags_Pa", Var, 0, ""},
+ {"Phoenician", Var, 0, ""},
+ {"Pi", Var, 0, ""},
+ {"Po", Var, 0, ""},
+ {"Prepended_Concatenation_Mark", Var, 7, ""},
+ {"PrintRanges", Var, 0, ""},
+ {"Properties", Var, 0, ""},
+ {"Ps", Var, 0, ""},
+ {"Psalter_Pahlavi", Var, 4, ""},
+ {"Punct", Var, 0, ""},
+ {"Quotation_Mark", Var, 0, ""},
+ {"Radical", Var, 0, ""},
+ {"Range16", Type, 0, ""},
+ {"Range16.Hi", Field, 0, ""},
+ {"Range16.Lo", Field, 0, ""},
+ {"Range16.Stride", Field, 0, ""},
+ {"Range32", Type, 0, ""},
+ {"Range32.Hi", Field, 0, ""},
+ {"Range32.Lo", Field, 0, ""},
+ {"Range32.Stride", Field, 0, ""},
+ {"RangeTable", Type, 0, ""},
+ {"RangeTable.LatinOffset", Field, 1, ""},
+ {"RangeTable.R16", Field, 0, ""},
+ {"RangeTable.R32", Field, 0, ""},
+ {"Regional_Indicator", Var, 10, ""},
+ {"Rejang", Var, 0, ""},
+ {"ReplacementChar", Const, 0, ""},
+ {"Runic", Var, 0, ""},
+ {"S", Var, 0, ""},
+ {"STerm", Var, 0, ""},
+ {"Samaritan", Var, 0, ""},
+ {"Saurashtra", Var, 0, ""},
+ {"Sc", Var, 0, ""},
+ {"Scripts", Var, 0, ""},
+ {"Sentence_Terminal", Var, 7, ""},
+ {"Sharada", Var, 1, ""},
+ {"Shavian", Var, 0, ""},
+ {"Siddham", Var, 4, ""},
+ {"SignWriting", Var, 5, ""},
+ {"SimpleFold", Func, 0, "func(r rune) rune"},
+ {"Sinhala", Var, 0, ""},
+ {"Sk", Var, 0, ""},
+ {"Sm", Var, 0, ""},
+ {"So", Var, 0, ""},
+ {"Soft_Dotted", Var, 0, ""},
+ {"Sogdian", Var, 13, ""},
+ {"Sora_Sompeng", Var, 1, ""},
+ {"Soyombo", Var, 10, ""},
+ {"Space", Var, 0, ""},
+ {"SpecialCase", Type, 0, ""},
+ {"Sundanese", Var, 0, ""},
+ {"Syloti_Nagri", Var, 0, ""},
+ {"Symbol", Var, 0, ""},
+ {"Syriac", Var, 0, ""},
+ {"Tagalog", Var, 0, ""},
+ {"Tagbanwa", Var, 0, ""},
+ {"Tai_Le", Var, 0, ""},
+ {"Tai_Tham", Var, 0, ""},
+ {"Tai_Viet", Var, 0, ""},
+ {"Takri", Var, 1, ""},
+ {"Tamil", Var, 0, ""},
+ {"Tangsa", Var, 21, ""},
+ {"Tangut", Var, 7, ""},
+ {"Telugu", Var, 0, ""},
+ {"Terminal_Punctuation", Var, 0, ""},
+ {"Thaana", Var, 0, ""},
+ {"Thai", Var, 0, ""},
+ {"Tibetan", Var, 0, ""},
+ {"Tifinagh", Var, 0, ""},
+ {"Tirhuta", Var, 4, ""},
+ {"Title", Var, 0, ""},
+ {"TitleCase", Const, 0, ""},
+ {"To", Func, 0, "func(_case int, r rune) rune"},
+ {"ToLower", Func, 0, "func(r rune) rune"},
+ {"ToTitle", Func, 0, "func(r rune) rune"},
+ {"ToUpper", Func, 0, "func(r rune) rune"},
+ {"Toto", Var, 21, ""},
+ {"TurkishCase", Var, 0, ""},
+ {"Ugaritic", Var, 0, ""},
+ {"Unified_Ideograph", Var, 0, ""},
+ {"Upper", Var, 0, ""},
+ {"UpperCase", Const, 0, ""},
+ {"UpperLower", Const, 0, ""},
+ {"Vai", Var, 0, ""},
+ {"Variation_Selector", Var, 0, ""},
+ {"Version", Const, 0, ""},
+ {"Vithkuqi", Var, 21, ""},
+ {"Wancho", Var, 14, ""},
+ {"Warang_Citi", Var, 4, ""},
+ {"White_Space", Var, 0, ""},
+ {"Yezidi", Var, 16, ""},
+ {"Yi", Var, 0, ""},
+ {"Z", Var, 0, ""},
+ {"Zanabazar_Square", Var, 10, ""},
+ {"Zl", Var, 0, ""},
+ {"Zp", Var, 0, ""},
+ {"Zs", Var, 0, ""},
+ },
+ "unicode/utf16": {
+ {"AppendRune", Func, 20, "func(a []uint16, r rune) []uint16"},
+ {"Decode", Func, 0, "func(s []uint16) []rune"},
+ {"DecodeRune", Func, 0, "func(r1 rune, r2 rune) rune"},
+ {"Encode", Func, 0, "func(s []rune) []uint16"},
+ {"EncodeRune", Func, 0, "func(r rune) (r1 rune, r2 rune)"},
+ {"IsSurrogate", Func, 0, "func(r rune) bool"},
+ {"RuneLen", Func, 23, "func(r rune) int"},
+ },
+ "unicode/utf8": {
+ {"AppendRune", Func, 18, "func(p []byte, r rune) []byte"},
+ {"DecodeLastRune", Func, 0, "func(p []byte) (r rune, size int)"},
+ {"DecodeLastRuneInString", Func, 0, "func(s string) (r rune, size int)"},
+ {"DecodeRune", Func, 0, "func(p []byte) (r rune, size int)"},
+ {"DecodeRuneInString", Func, 0, "func(s string) (r rune, size int)"},
+ {"EncodeRune", Func, 0, "func(p []byte, r rune) int"},
+ {"FullRune", Func, 0, "func(p []byte) bool"},
+ {"FullRuneInString", Func, 0, "func(s string) bool"},
+ {"MaxRune", Const, 0, ""},
+ {"RuneCount", Func, 0, "func(p []byte) int"},
+ {"RuneCountInString", Func, 0, "func(s string) (n int)"},
+ {"RuneError", Const, 0, ""},
+ {"RuneLen", Func, 0, "func(r rune) int"},
+ {"RuneSelf", Const, 0, ""},
+ {"RuneStart", Func, 0, "func(b byte) bool"},
+ {"UTFMax", Const, 0, ""},
+ {"Valid", Func, 0, "func(p []byte) bool"},
+ {"ValidRune", Func, 1, "func(r rune) bool"},
+ {"ValidString", Func, 0, "func(s string) bool"},
+ },
+ "unique": {
+ {"(Handle).Value", Method, 23, ""},
+ {"Handle", Type, 23, ""},
+ {"Make", Func, 23, "func[T comparable](value T) Handle[T]"},
+ },
+ "unsafe": {
+ {"Add", Func, 0, ""},
+ {"Alignof", Func, 0, ""},
+ {"Offsetof", Func, 0, ""},
+ {"Pointer", Type, 0, ""},
+ {"Sizeof", Func, 0, ""},
+ {"Slice", Func, 0, ""},
+ {"SliceData", Func, 0, ""},
+ {"String", Func, 0, ""},
+ {"StringData", Func, 0, ""},
+ },
+ "weak": {
+ {"(Pointer).Value", Method, 24, ""},
+ {"Make", Func, 24, "func[T any](ptr *T) Pointer[T]"},
+ {"Pointer", Type, 24, ""},
+ },
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
new file mode 100644
index 0000000000..e223e0f340
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run generate.go
+
+// Package stdlib provides a table of all exported symbols in the
+// standard library, along with the version at which they first
+// appeared. It also provides the import graph of std packages.
+package stdlib
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Symbol struct {
+ Name string
+ Kind Kind
+ Version Version // Go version that first included the symbol
+ // Signature provides the type of a function (defined only for Kind=Func).
+ // Imported types are denoted as pkg.T; pkg is not fully qualified.
+ // TODO(adonovan): use an unambiguous encoding that is parseable.
+ //
+ // Example2:
+ // func[M ~map[K]V, K comparable, V any](m M) M
+ // func(fi fs.FileInfo, link string) (*Header, error)
+ Signature string // if Kind == stdlib.Func
+}
+
+// A Kind indicates the kind of a symbol:
+// function, variable, constant, type, and so on.
+type Kind int8
+
+const (
+ Invalid Kind = iota // Example name:
+ Type // "Buffer"
+ Func // "Println"
+ Var // "EOF"
+ Const // "Pi"
+ Field // "Point.X"
+ Method // "(*Buffer).Grow"
+)
+
+func (kind Kind) String() string {
+ return [...]string{
+ Invalid: "invalid",
+ Type: "type",
+ Func: "func",
+ Var: "var",
+ Const: "const",
+ Field: "field",
+ Method: "method",
+ }[kind]
+}
+
+// A Version represents a version of Go of the form "go1.%d".
+type Version int8
+
+// String returns a version string of the form "go1.23", without allocating.
+func (v Version) String() string { return versions[v] }
+
+var versions [30]string // (increase constant as needed)
+
+func init() {
+ for i := range versions {
+ versions[i] = fmt.Sprintf("go1.%d", i)
+ }
+}
+
+// HasPackage reports whether the specified package path is part of
+// the standard library's public API.
+func HasPackage(path string) bool {
+ _, ok := PackageSymbols[path]
+ return ok
+}
+
+// SplitField splits the field symbol name into type and field
+// components. It must be called only on Field symbols.
+//
+// Example: "File.Package" -> ("File", "Package")
+func (sym *Symbol) SplitField() (typename, name string) {
+ if sym.Kind != Field {
+ panic("not a field")
+ }
+ typename, name, _ = strings.Cut(sym.Name, ".")
+ return
+}
+
+// SplitMethod splits the method symbol name into pointer, receiver,
+// and method components. It must be called only on Method symbols.
+//
+// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow")
+func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) {
+ if sym.Kind != Method {
+ panic("not a method")
+ }
+ recv, name, _ = strings.Cut(sym.Name, ".")
+ recv = recv[len("(") : len(recv)-len(")")]
+ ptr = recv[0] == '*'
+ if ptr {
+ recv = recv[len("*"):]
+ }
+ return
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 0000000000..cdae2b8e81
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,68 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that
+// interact with generic Go code, as introduced with Go 1.18. It
+// supplements the standard library APIs. Notably, the StructuralTerms
+// API computes a minimal representation of the structural
+// restrictions on a type parameter.
+//
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+ case *ast.IndexListExpr:
+ return e.X, e.Lbrack, e.Indices, e.Rbrack
+ }
+ return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(indices) {
+ case 0:
+ panic("empty indices")
+ case 1:
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: indices[0],
+ Rbrack: rbrack,
+ }
+ default:
+ return &ast.IndexListExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: indices,
+ Rbrack: rbrack,
+ }
+ }
+}
+
+// IsTypeParam reports whether t is a type parameter (or an alias of one).
+func IsTypeParam(t types.Type) bool {
+ _, ok := types.Unalias(t).(*types.TypeParam)
+ return ok
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 0000000000..27a2b17929
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "fmt"
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(T types.Type) ([]*types.Term, error) {
+ // typeSetOf(T) == typeSetOf(Unalias(T))
+ typ := types.Unalias(T)
+ if named, ok := typ.(*types.Named); ok {
+ typ = named.Underlying()
+ }
+ switch typ := typ.(type) {
+ case *types.TypeParam:
+ return StructuralTerms(typ)
+ case *types.Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*types.Term{types.NewTerm(false, T)}, nil
+ }
+}
+
+// Deref returns the type of the variable pointed to by t,
+// if t's core type is a pointer; otherwise it returns t.
+//
+// Do not assume that Deref(T)==T implies T is not a pointer:
+// consider "type T *T", for example.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func Deref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ return t
+}
+
+// MustDeref returns the type of the variable pointed to by t.
+// It panics if t's core type is not a pointer.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func MustDeref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ panic(fmt.Sprintf("%v is not a pointer", t))
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
new file mode 100644
index 0000000000..709d2fc144
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -0,0 +1,131 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/aliases"
+)
+
+// Free is a memoization of the set of free type parameters within a
+// type. It makes a sequence of calls to [Free.Has] for overlapping
+// types more efficient. The zero value is ready for use.
+//
+// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
+type Free struct {
+ seen map[types.Type]bool
+}
+
+// Has reports whether the specified type has a free type parameter.
+func (w *Free) Has(typ types.Type) (res bool) {
+ // detect cycles
+ if x, ok := w.seen[typ]; ok {
+ return x
+ }
+ if w.seen == nil {
+ w.seen = make(map[types.Type]bool)
+ }
+ w.seen[typ] = false
+ defer func() {
+ w.seen[typ] = res
+ }()
+
+ switch t := typ.(type) {
+ case nil, *types.Basic: // TODO(gri) should nil be handled here?
+ break
+
+ case *types.Alias:
+ if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
+ return true // This is an uninstantiated Alias.
+ }
+ // The expansion of an alias can have free type parameters,
+ // whether or not the alias itself has type parameters:
+ //
+ // func _[K comparable]() {
+ // type Set = map[K]bool // free(Set) = {K}
+ // type MapTo[V] = map[K]V // free(Map[foo]) = {V}
+ // }
+ //
+ // So, we must Unalias.
+ return w.Has(types.Unalias(t))
+
+ case *types.Array:
+ return w.Has(t.Elem())
+
+ case *types.Slice:
+ return w.Has(t.Elem())
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ if w.Has(t.Field(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Pointer:
+ return w.Has(t.Elem())
+
+ case *types.Tuple:
+ n := t.Len()
+ for i := range n {
+ if w.Has(t.At(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Signature:
+ // t.tparams may not be nil if we are looking at a signature
+ // of a generic function type (or an interface method) that is
+ // part of the type we're testing. We don't care about these type
+ // parameters.
+ // Similarly, the receiver of a method may declare (rather than
+ // use) type parameters, we don't care about those either.
+ // Thus, we only need to look at the input and result parameters.
+ return w.Has(t.Params()) || w.Has(t.Results())
+
+ case *types.Interface:
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ if w.Has(t.Method(i).Type()) {
+ return true
+ }
+ }
+ terms, err := InterfaceTermSet(t)
+ if err != nil {
+ return false // ill typed
+ }
+ for _, term := range terms {
+ if w.Has(term.Type()) {
+ return true
+ }
+ }
+
+ case *types.Map:
+ return w.Has(t.Key()) || w.Has(t.Elem())
+
+ case *types.Chan:
+ return w.Has(t.Elem())
+
+ case *types.Named:
+ args := t.TypeArgs()
+ if params := t.TypeParams(); params.Len() > args.Len() {
+ return true // this is an uninstantiated named type.
+ }
+ for i, n := 0, args.Len(); i < n; i++ {
+ if w.Has(args.At(i)) {
+ return true
+ }
+ }
+ return w.Has(t.Underlying()) // recurse for types local to parameterized functions
+
+ case *types.TypeParam:
+ return true
+
+ default:
+ panic(t) // unreachable
+ }
+
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 0000000000..f49802b8ef
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+// type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*types.Term
+ for _, term := range tset.terms {
+ terms = append(terms, types.NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+func indentf(depth int, format string, args ...any) {
+ fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ panic("nil type")
+ }
+
+ if debug {
+ indentf(depth, "%s", t.String())
+ defer func() {
+ if err != nil {
+ indentf(depth, "=> %s", err)
+ } else {
+ indentf(depth, "=> %s", res.terms.String())
+ }
+ }()
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *types.Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *types.TypeParam, *types.Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *types.TypeParam:
+ panic("unreachable")
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 0000000000..9bc29143f6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,169 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/termlist.go
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "go/types"
+ "strings"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// termSep is the separator used between individual terms.
+const termSep = " | "
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf strings.Builder
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(termSep)
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 0000000000..fa758cdc98
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,172 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/typeterm.go
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
new file mode 100644
index 0000000000..3db2a135b9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
@@ -0,0 +1,137 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/ast"
+ "go/types"
+ _ "unsafe"
+)
+
+// CallKind describes the function position of an [*ast.CallExpr].
+type CallKind int
+
+const (
+ CallStatic CallKind = iota // static call to known function
+ CallInterface // dynamic call through an interface method
+ CallDynamic // dynamic call of a func value
+ CallBuiltin // call to a builtin function
+ CallConversion // a conversion (not a call)
+)
+
+var callKindNames = []string{
+ "CallStatic",
+ "CallInterface",
+ "CallDynamic",
+ "CallBuiltin",
+ "CallConversion",
+}
+
+func (k CallKind) String() string {
+ if i := int(k); i >= 0 && i < len(callKindNames) {
+ return callKindNames[i]
+ }
+ return fmt.Sprintf("typeutil.CallKind(%d)", k)
+}
+
+// ClassifyCall classifies the function position of a call expression ([*ast.CallExpr]).
+// It distinguishes among true function calls, calls to builtins, and type conversions,
+// and further classifies function calls as static calls (where the function is known),
+// dynamic interface calls, and other dynamic calls.
+//
+// For the declarations:
+//
+// func f() {}
+// func g[T any]() {}
+// var v func()
+// var s []func()
+// type I interface { M() }
+// var i I
+//
+// ClassifyCall returns the following:
+//
+// f() CallStatic
+// g[int]() CallStatic
+// i.M() CallInterface
+// min(1, 2) CallBuiltin
+// v() CallDynamic
+// s[0]() CallDynamic
+// int(x) CallConversion
+// []byte("") CallConversion
+func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
+ if info.Types == nil {
+ panic("ClassifyCall: info.Types is nil")
+ }
+ tv := info.Types[call.Fun]
+ if tv.IsType() {
+ return CallConversion
+ }
+ if tv.IsBuiltin() {
+ return CallBuiltin
+ }
+ obj := info.Uses[UsedIdent(info, call.Fun)]
+ // Classify the call by the type of the object, if any.
+ switch obj := obj.(type) {
+ case *types.Func:
+ if interfaceMethod(obj) {
+ return CallInterface
+ }
+ return CallStatic
+ default:
+ return CallDynamic
+ }
+}
+
+// UsedIdent returns the identifier such that info.Uses[UsedIdent(info, e)]
+// is the [types.Object] used by e, if any.
+//
+// If e is one of various forms of reference:
+//
+// f, c, v, T lexical reference
+// pkg.X qualified identifier
+// f[T] or pkg.F[K,V] instantiations of the above kinds
+// expr.f field or method value selector
+// T.f method expression selector
+//
+// UsedIdent returns the identifier whose is associated value in [types.Info.Uses]
+// is the object to which it refers.
+//
+// For the declarations:
+//
+// func F[T any] {...}
+// type I interface { M() }
+// var (
+// x int
+// s struct { f int }
+// a []int
+// i I
+// )
+//
+// UsedIdent returns the following:
+//
+// Expr UsedIdent
+// x x
+// s.f f
+// F[int] F
+// i.M M
+// I.M M
+// min min
+// int int
+// 1 nil
+// a[0] nil
+// []byte nil
+//
+// Note: if e is an instantiated function or method, UsedIdent returns
+// the corresponding generic function or method on the generic type.
+func UsedIdent(info *types.Info, e ast.Expr) *ast.Ident {
+ return usedIdent(info, e)
+}
+
+//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
+func usedIdent(info *types.Info, e ast.Expr) *ast.Ident
+
+//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
+func interfaceMethod(f *types.Func) bool
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go
new file mode 100644
index 0000000000..4957f02164
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go
@@ -0,0 +1,133 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/types"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// ForEachElement calls f for type T and each type reachable from its
+// type through reflection. It does this by recursively stripping off
+// type constructors; in addition, for each named type N, the type *N
+// is added to the result as it may have additional methods.
+//
+// The caller must provide an initially empty set used to de-duplicate
+// identical types, potentially across multiple calls to ForEachElement.
+// (Its final value holds all the elements seen, matching the arguments
+// passed to f.)
+//
+// TODO(adonovan): share/harmonize with go/callgraph/rta.
+func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
+ var visit func(T types.Type, skip bool)
+ visit = func(T types.Type, skip bool) {
+ if !skip {
+ if seen, _ := rtypes.Set(T, true).(bool); seen {
+ return // de-dup
+ }
+
+ f(T) // notify caller of new element type
+ }
+
+ // Recursion over signatures of each method.
+ tmset := msets.MethodSet(T)
+ for i := 0; i < tmset.Len(); i++ {
+ sig := tmset.At(i).Type().(*types.Signature)
+ // It is tempting to call visit(sig, false)
+ // but, as noted in golang.org/cl/65450043,
+ // the Signature.Recv field is ignored by
+ // types.Identical and typeutil.Map, which
+ // is confusing at best.
+ //
+ // More importantly, the true signature rtype
+ // reachable from a method using reflection
+ // has no receiver but an extra ordinary parameter.
+ // For the Read method of io.Reader we want:
+ // func(Reader, []byte) (int, error)
+ // but here sig is:
+ // func([]byte) (int, error)
+ // with .Recv = Reader (though it is hard to
+ // notice because it doesn't affect Signature.String
+ // or types.Identical).
+ //
+ // TODO(adonovan): construct and visit the correct
+ // non-method signature with an extra parameter
+ // (though since unnamed func types have no methods
+ // there is essentially no actual demand for this).
+ //
+ // TODO(adonovan): document whether or not it is
+ // safe to skip non-exported methods (as RTA does).
+ visit(sig.Params(), true) // skip the Tuple
+ visit(sig.Results(), true) // skip the Tuple
+ }
+
+ switch T := T.(type) {
+ case *types.Alias:
+ visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
+
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ visit(T.Elem(), false)
+
+ case *types.Slice:
+ visit(T.Elem(), false)
+
+ case *types.Chan:
+ visit(T.Elem(), false)
+
+ case *types.Map:
+ visit(T.Key(), false)
+ visit(T.Elem(), false)
+
+ case *types.Signature:
+ if T.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
+ }
+ visit(T.Params(), true) // skip the Tuple
+ visit(T.Results(), true) // skip the Tuple
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ visit(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ visit(T.Underlying(), true) // skip the unnamed type
+
+ case *types.Array:
+ visit(T.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, T.NumFields(); i < n; i++ {
+ // TODO(adonovan): document whether or not
+ // it is safe to skip non-exported fields.
+ visit(T.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, T.Len(); i < n; i++ {
+ visit(T.At(i).Type(), false)
+ }
+
+ case *types.TypeParam, *types.Union:
+ // forEachReachable must not be called on parameterized types.
+ panic(T)
+
+ default:
+ panic(T)
+ }
+ }
+ visit(T, false)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
new file mode 100644
index 0000000000..235a6defc4
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -0,0 +1,1560 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+//go:generate stringer -type=ErrorCode
+
+type ErrorCode int
+
+// This file defines the error codes that can be produced during type-checking.
+// Collectively, these codes provide an identifier that may be used to
+// implement special handling for certain types of errors.
+//
+// Error codes should be fine-grained enough that the exact nature of the error
+// can be easily determined, but coarse enough that they are not an
+// implementation detail of the type checking algorithm. As a rule-of-thumb,
+// errors should be considered equivalent if there is a theoretical refactoring
+// of the type checker in which they are emitted in exactly one place. For
+// example, the type checker emits different error messages for "too many
+// arguments" and "too few arguments", but one can imagine an alternative type
+// checker where this check instead just emits a single "wrong number of
+// arguments", so these errors should have the same code.
+//
+// Error code names should be as brief as possible while retaining accuracy and
+// distinctiveness. In most cases names should start with an adjective
+// describing the nature of the error (e.g. "invalid", "unused", "misplaced"),
+// and end with a noun identifying the relevant language object. For example,
+// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the
+// convention that "bad" implies a problem with syntax, and "invalid" implies a
+// problem with types.
+
+const (
+ // InvalidSyntaxTree occurs if an invalid syntax tree is provided
+ // to the type checker. It should never happen.
+ InvalidSyntaxTree ErrorCode = -1
+)
+
+const (
+ _ ErrorCode = iota
+
+ // Test is reserved for errors that only apply while in self-test mode.
+ Test
+
+ /* package names */
+
+ // BlankPkgName occurs when a package name is the blank identifier "_".
+ //
+ // Per the spec:
+ // "The PackageName must not be the blank identifier."
+ BlankPkgName
+
+ // MismatchedPkgName occurs when a file's package name doesn't match the
+ // package name already established by other files.
+ MismatchedPkgName
+
+ // InvalidPkgUse occurs when a package identifier is used outside of a
+ // selector expression.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // var _ = fmt
+ InvalidPkgUse
+
+ /* imports */
+
+ // BadImportPath occurs when an import path is not valid.
+ BadImportPath
+
+ // BrokenImport occurs when importing a package fails.
+ //
+ // Example:
+ // import "amissingpackage"
+ BrokenImport
+
+ // ImportCRenamed occurs when the special import "C" is renamed. "C" is a
+ // pseudo-package, and must not be renamed.
+ //
+ // Example:
+ // import _ "C"
+ ImportCRenamed
+
+ // UnusedImport occurs when an import is unused.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // func main() {}
+ UnusedImport
+
+ /* initialization */
+
+ // InvalidInitCycle occurs when an invalid cycle is detected within the
+ // initialization graph.
+ //
+ // Example:
+ // var x int = f()
+ //
+ // func f() int { return x }
+ InvalidInitCycle
+
+ /* decls */
+
+ // DuplicateDecl occurs when an identifier is declared multiple times.
+ //
+ // Example:
+ // var x = 1
+ // var x = 2
+ DuplicateDecl
+
+ // InvalidDeclCycle occurs when a declaration cycle is not valid.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T struct {
+ // a [n]int
+ // }
+ //
+ // var n = unsafe.Sizeof(T{})
+ InvalidDeclCycle
+
+ // InvalidTypeCycle occurs when a cycle in type definitions results in a
+ // type that is not well-defined.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T [unsafe.Sizeof(T{})]int
+ InvalidTypeCycle
+
+ /* decls > const */
+
+ // InvalidConstInit occurs when a const declaration has a non-constant
+ // initializer.
+ //
+ // Example:
+ // var x int
+ // const _ = x
+ InvalidConstInit
+
+ // InvalidConstVal occurs when a const value cannot be converted to its
+ // target type.
+ //
+ // TODO(findleyr): this error code and example are not very clear. Consider
+ // removing it.
+ //
+ // Example:
+ // const _ = 1 << "hello"
+ InvalidConstVal
+
+ // InvalidConstType occurs when the underlying type in a const declaration
+ // is not a valid constant type.
+ //
+ // Example:
+ // const c *int = 4
+ InvalidConstType
+
+ /* decls > var (+ other variable assignment codes) */
+
+ // UntypedNilUse occurs when the predeclared (untyped) value nil is used to
+ // initialize a variable declared without an explicit type.
+ //
+ // Example:
+ // var x = nil
+ UntypedNilUse
+
+ // WrongAssignCount occurs when the number of values on the right-hand side
+ // of an assignment or initialization expression does not match the number
+ // of variables on the left-hand side.
+ //
+ // Example:
+ // var x = 1, 2
+ WrongAssignCount
+
+ // UnassignableOperand occurs when the left-hand side of an assignment is
+ // not assignable.
+ //
+ // Example:
+ // func f() {
+ // const c = 1
+ // c = 2
+ // }
+ UnassignableOperand
+
+ // NoNewVar occurs when a short variable declaration (':=') does not declare
+ // new variables.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // x := 2
+ // }
+ NoNewVar
+
+ // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does
+ // not have single-valued left-hand or right-hand side.
+ //
+ // Per the spec:
+ // "In assignment operations, both the left- and right-hand expression lists
+ // must contain exactly one single-valued expression"
+ //
+ // Example:
+ // func f() int {
+ // x, y := 1, 2
+ // x, y += 1
+ // return x + y
+ // }
+ MultiValAssignOp
+
+ // InvalidIfaceAssign occurs when a value of type T is used as an
+ // interface, but T does not implement a method of the expected interface.
+ //
+ // Example:
+ // type I interface {
+ // f()
+ // }
+ //
+ // type T int
+ //
+ // var x I = T(1)
+ InvalidIfaceAssign
+
+ // InvalidChanAssign occurs when a chan assignment is invalid.
+ //
+ // Per the spec, a value x is assignable to a channel type T if:
+ // "x is a bidirectional channel value, T is a channel type, x's type V and
+ // T have identical element types, and at least one of V or T is not a
+ // defined type."
+ //
+ // Example:
+ // type T1 chan int
+ // type T2 chan int
+ //
+ // var x T1
+ // // Invalid assignment because both types are named
+ // var _ T2 = x
+ InvalidChanAssign
+
+ // IncompatibleAssign occurs when the type of the right-hand side expression
+ // in an assignment cannot be assigned to the type of the variable being
+ // assigned.
+ //
+ // Example:
+ // var x []int
+ // var _ int = x
+ IncompatibleAssign
+
+ // UnaddressableFieldAssign occurs when trying to assign to a struct field
+ // in a map value.
+ //
+ // Example:
+ // func f() {
+ // m := make(map[string]struct{i int})
+ // m["foo"].i = 42
+ // }
+ UnaddressableFieldAssign
+
+ /* decls > type (+ other type expression codes) */
+
+ // NotAType occurs when the identifier used as the underlying type in a type
+ // declaration or the right-hand side of a type alias does not denote a type.
+ //
+ // Example:
+ // var S = 2
+ //
+ // type T S
+ NotAType
+
+ // InvalidArrayLen occurs when an array length is not a constant value.
+ //
+ // Example:
+ // var n = 3
+ // var _ = [n]int{}
+ InvalidArrayLen
+
+ // BlankIfaceMethod occurs when a method name is '_'.
+ //
+ // Per the spec:
+ // "The name of each explicitly specified method must be unique and not
+ // blank."
+ //
+ // Example:
+ // type T interface {
+ // _(int)
+ // }
+ BlankIfaceMethod
+
+ // IncomparableMapKey occurs when a map key type does not support the == and
+ // != operators.
+ //
+ // Per the spec:
+ // "The comparison operators == and != must be fully defined for operands of
+ // the key type; thus the key type must not be a function, map, or slice."
+ //
+ // Example:
+ // var x map[T]int
+ //
+ // type T []int
+ IncomparableMapKey
+
+ // InvalidIfaceEmbed occurs when a non-interface type is embedded in an
+ // interface.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (T) m()
+ //
+ // type I interface {
+ // T
+ // }
+ InvalidIfaceEmbed
+
+ // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
+ // and T itself is itself a pointer, an unsafe.Pointer, or an interface.
+ //
+ // Per the spec:
+ // "An embedded field must be specified as a type name T or as a pointer to
+ // a non-interface type name *T, and T itself may not be a pointer type."
+ //
+ // Example:
+ // type T *int
+ //
+ // type S struct {
+ // *T
+ // }
+ InvalidPtrEmbed
+
+ /* decls > func and method */
+
+ // BadRecv occurs when a method declaration does not have exactly one
+ // receiver parameter.
+ //
+ // Example:
+ // func () _() {}
+ BadRecv
+
+ // InvalidRecv occurs when a receiver type expression is not of the form T
+ // or *T, or T is a pointer type.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (**T) m() {}
+ InvalidRecv
+
+ // DuplicateFieldAndMethod occurs when an identifier appears as both a field
+ // and method name.
+ //
+ // Example:
+ // type T struct {
+ // m int
+ // }
+ //
+ // func (T) m() {}
+ DuplicateFieldAndMethod
+
+ // DuplicateMethod occurs when two methods on the same receiver type have
+ // the same name.
+ //
+ // Example:
+ // type T struct {}
+ // func (T) m() {}
+ // func (T) m(i int) int { return i }
+ DuplicateMethod
+
+ /* decls > special */
+
+ // InvalidBlank occurs when a blank identifier is used as a value or type.
+ //
+ // Per the spec:
+ // "The blank identifier may appear as an operand only on the left-hand side
+ // of an assignment."
+ //
+ // Example:
+ // var x = _
+ InvalidBlank
+
+ // InvalidIota occurs when the predeclared identifier iota is used outside
+ // of a constant declaration.
+ //
+ // Example:
+ // var x = iota
+ InvalidIota
+
+ // MissingInitBody occurs when an init function is missing its body.
+ //
+ // Example:
+ // func init()
+ MissingInitBody
+
+ // InvalidInitSig occurs when an init function declares parameters or
+ // results.
+ //
+ // Example:
+ // func init() int { return 1 }
+ InvalidInitSig
+
+ // InvalidInitDecl occurs when init is declared as anything other than a
+ // function.
+ //
+ // Example:
+ // var init = 1
+ InvalidInitDecl
+
+ // InvalidMainDecl occurs when main is declared as anything other than a
+ // function, in a main package.
+ InvalidMainDecl
+
+ /* exprs */
+
+ // TooManyValues occurs when a function returns too many values for the
+ // expression context in which it is used.
+ //
+ // Example:
+ // func ReturnTwo() (int, int) {
+ // return 1, 2
+ // }
+ //
+ // var x = ReturnTwo()
+ TooManyValues
+
+ // NotAnExpr occurs when a type expression is used where a value expression
+ // is expected.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func f() {
+ // T
+ // }
+ NotAnExpr
+
+ /* exprs > const */
+
+ // TruncatedFloat occurs when a float constant is truncated to an integer
+ // value.
+ //
+ // Example:
+ // var _ int = 98.6
+ TruncatedFloat
+
+ // NumericOverflow occurs when a numeric constant overflows its target type.
+ //
+ // Example:
+ // var x int8 = 1000
+ NumericOverflow
+
+ /* exprs > operation */
+
+ // UndefinedOp occurs when an operator is not defined for the type(s) used
+ // in an operation.
+ //
+ // Example:
+ // var c = "a" - "b"
+ UndefinedOp
+
+ // MismatchedTypes occurs when operand types are incompatible in a binary
+ // operation.
+ //
+ // Example:
+ // var a = "hello"
+ // var b = 1
+ // var c = a - b
+ MismatchedTypes
+
+ // DivByZero occurs when a division operation is provable at compile
+ // time to be a division by zero.
+ //
+ // Example:
+ // const divisor = 0
+ // var x int = 1/divisor
+ DivByZero
+
+ // NonNumericIncDec occurs when an increment or decrement operator is
+ // applied to a non-numeric value.
+ //
+ // Example:
+ // func f() {
+ // var c = "c"
+ // c++
+ // }
+ NonNumericIncDec
+
+ /* exprs > ptr */
+
+ // UnaddressableOperand occurs when the & operator is applied to an
+ // unaddressable expression.
+ //
+ // Example:
+ // var x = &1
+ UnaddressableOperand
+
+ // InvalidIndirection occurs when a non-pointer value is indirected via the
+ // '*' operator.
+ //
+ // Example:
+ // var x int
+ // var y = *x
+ InvalidIndirection
+
+ /* exprs > [] */
+
+ // NonIndexableOperand occurs when an index operation is applied to a value
+ // that cannot be indexed.
+ //
+ // Example:
+ // var x = 1
+ // var y = x[1]
+ NonIndexableOperand
+
+ // InvalidIndex occurs when an index argument is not of integer type,
+ // negative, or out-of-bounds.
+ //
+ // Example:
+ // var s = [...]int{1,2,3}
+ // var x = s[5]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var _ = s[-1]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var i string
+ // var _ = s[i]
+ InvalidIndex
+
+ // SwappedSliceIndices occurs when constant indices in a slice expression
+ // are decreasing in value.
+ //
+ // Example:
+ // var _ = []int{1,2,3}[2:1]
+ SwappedSliceIndices
+
+ /* operators > slice */
+
+ // NonSliceableOperand occurs when a slice operation is applied to a value
+ // whose type is not sliceable, or is unaddressable.
+ //
+ // Example:
+ // var x = [...]int{1, 2, 3}[:1]
+ //
+ // Example:
+ // var x = 1
+ // var y = 1[:1]
+ NonSliceableOperand
+
+ // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is
+ // applied to a string.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s[1:2:3]
+ InvalidSliceExpr
+
+ /* exprs > shift */
+
+ // InvalidShiftCount occurs when the right-hand side of a shift operation is
+ // either non-integer, negative, or too large.
+ //
+ // Example:
+ // var (
+ // x string
+ // y int = 1 << x
+ // )
+ InvalidShiftCount
+
+ // InvalidShiftOperand occurs when the shifted operand is not an integer.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s << 2
+ InvalidShiftOperand
+
+ /* exprs > chan */
+
+ // InvalidReceive occurs when there is a channel receive from a value that
+ // is either not a channel, or is a send-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // <-x
+ // }
+ InvalidReceive
+
+ // InvalidSend occurs when there is a channel send to a value that is not a
+ // channel, or is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // x <- "hello!"
+ // }
+ InvalidSend
+
+ /* exprs > literal */
+
+ // DuplicateLitKey occurs when an index is duplicated in a slice, array, or
+ // map literal.
+ //
+ // Example:
+ // var _ = []int{0:1, 0:2}
+ //
+ // Example:
+ // var _ = map[string]int{"a": 1, "a": 2}
+ DuplicateLitKey
+
+ // MissingLitKey occurs when a map literal is missing a key expression.
+ //
+ // Example:
+ // var _ = map[string]int{1}
+ MissingLitKey
+
+ // InvalidLitIndex occurs when the key in a key-value element of a slice or
+ // array literal is not an integer constant.
+ //
+ // Example:
+ // var i = 0
+ // var x = []string{i: "world"}
+ InvalidLitIndex
+
+ // OversizeArrayLit occurs when an array literal exceeds its length.
+ //
+ // Example:
+ // var _ = [2]int{1,2,3}
+ OversizeArrayLit
+
+ // MixedStructLit occurs when a struct literal contains a mix of positional
+ // and named elements.
+ //
+ // Example:
+ // var _ = struct{i, j int}{i: 1, 2}
+ MixedStructLit
+
+ // InvalidStructLit occurs when a positional struct literal has an incorrect
+ // number of values.
+ //
+ // Example:
+ // var _ = struct{i, j int}{1,2,3}
+ InvalidStructLit
+
+ // MissingLitField occurs when a struct literal refers to a field that does
+ // not exist on the struct type.
+ //
+ // Example:
+ // var _ = struct{i int}{j: 2}
+ MissingLitField
+
+ // DuplicateLitField occurs when a struct literal contains duplicated
+ // fields.
+ //
+ // Example:
+ // var _ = struct{i int}{i: 1, i: 2}
+ DuplicateLitField
+
+ // UnexportedLitField occurs when a positional struct literal implicitly
+ // assigns an unexported field of an imported type.
+ UnexportedLitField
+
+ // InvalidLitField occurs when a field name is not a valid identifier.
+ //
+ // Example:
+ // var _ = struct{i int}{1: 1}
+ InvalidLitField
+
+ // UntypedLit occurs when a composite literal omits a required type
+ // identifier.
+ //
+ // Example:
+ // type outer struct{
+ // inner struct { i int }
+ // }
+ //
+ // var _ = outer{inner: {1}}
+ UntypedLit
+
+ // InvalidLit occurs when a composite literal expression does not match its
+ // type.
+ //
+ // Example:
+ // type P *struct{
+ // x int
+ // }
+ // var _ = P {}
+ InvalidLit
+
+ /* exprs > selector */
+
+ // AmbiguousSelector occurs when a selector is ambiguous.
+ //
+ // Example:
+ // type E1 struct { i int }
+ // type E2 struct { i int }
+ // type T struct { E1; E2 }
+ //
+ // var x T
+ // var _ = x.i
+ AmbiguousSelector
+
+ // UndeclaredImportedName occurs when a package-qualified identifier is
+ // undeclared by the imported package.
+ //
+ // Example:
+ // import "go/types"
+ //
+ // var _ = types.NotAnActualIdentifier
+ UndeclaredImportedName
+
+ // UnexportedName occurs when a selector refers to an unexported identifier
+ // of an imported package.
+ //
+ // Example:
+ // import "reflect"
+ //
+ // type _ reflect.flag
+ UnexportedName
+
+ // UndeclaredName occurs when an identifier is not declared in the current
+ // scope.
+ //
+ // Example:
+ // var x T
+ UndeclaredName
+
+ // MissingFieldOrMethod occurs when a selector references a field or method
+ // that does not exist.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // var x = T{}.f
+ MissingFieldOrMethod
+
+ /* exprs > ... */
+
+ // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is
+ // not valid.
+ //
+ // Example:
+ // var _ = map[int][...]int{0: {}}
+ BadDotDotDotSyntax
+
+ // NonVariadicDotDotDot occurs when a "..." is used on the final argument to
+ // a non-variadic function.
+ //
+ // Example:
+ // func printArgs(s []string) {
+ // for _, a := range s {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // s := []string{"a", "b", "c"}
+ // printArgs(s...)
+ // }
+ NonVariadicDotDotDot
+
+ // MisplacedDotDotDot occurs when a "..." is used somewhere other than the
+ // final argument to a function call.
+ //
+ // Example:
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // a := []int{1,2,3}
+ // printArgs(0, a...)
+ // }
+ MisplacedDotDotDot
+
+ // InvalidDotDotDotOperand occurs when a "..." operator is applied to a
+ // single-valued operand.
+ //
+ // Example:
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // a := 1
+ // printArgs(a...)
+ // }
+ //
+ // Example:
+ // func args() (int, int) {
+ // return 1, 2
+ // }
+ //
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func g() {
+ // printArgs(args()...)
+ // }
+ InvalidDotDotDotOperand
+
+ // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in
+ // function.
+ //
+ // Example:
+ // var s = []int{1, 2, 3}
+ // var l = len(s...)
+ InvalidDotDotDot
+
+ /* exprs > built-in */
+
+ // UncalledBuiltin occurs when a built-in function is used as a
+ // function-valued expression, instead of being called.
+ //
+ // Per the spec:
+ // "The built-in functions do not have standard Go types, so they can only
+ // appear in call expressions; they cannot be used as function values."
+ //
+ // Example:
+ // var _ = copy
+ UncalledBuiltin
+
+ // InvalidAppend occurs when append is called with a first argument that is
+ // not a slice.
+ //
+ // Example:
+ // var _ = append(1, 2)
+ InvalidAppend
+
+ // InvalidCap occurs when an argument to the cap built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = cap(s)
+ InvalidCap
+
+ // InvalidClose occurs when close(...) is called with an argument that is
+ // not of channel type, or that is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x int
+ // close(x)
+ // }
+ InvalidClose
+
+ // InvalidCopy occurs when the arguments are not of slice type or do not
+ // have compatible type.
+ //
+ // See https://golang.org/ref/spec#Appending_and_copying_slices for more
+ // information on the type requirements for the copy built-in.
+ //
+ // Example:
+ // func f() {
+ // var x []int
+ // y := []int64{1,2,3}
+ // copy(x, y)
+ // }
+ InvalidCopy
+
+ // InvalidComplex occurs when the complex built-in function is called with
+ // arguments with incompatible types.
+ //
+ // Example:
+ // var _ = complex(float32(1), float64(2))
+ InvalidComplex
+
+ // InvalidDelete occurs when the delete built-in function is called with a
+ // first argument that is not a map.
+ //
+ // Example:
+ // func f() {
+ // m := "hello"
+ // delete(m, "e")
+ // }
+ InvalidDelete
+
+ // InvalidImag occurs when the imag built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = imag(int(1))
+ InvalidImag
+
+ // InvalidLen occurs when an argument to the len built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = len(s)
+ InvalidLen
+
+ // SwappedMakeArgs occurs when make is called with three arguments, and its
+ // length argument is larger than its capacity argument.
+ //
+ // Example:
+ // var x = make([]int, 3, 2)
+ SwappedMakeArgs
+
+ // InvalidMake occurs when make is called with an unsupported type argument.
+ //
+ // See https://golang.org/ref/spec#Making_slices_maps_and_channels for
+ // information on the types that may be created using make.
+ //
+ // Example:
+ // var x = make(int)
+ InvalidMake
+
+ // InvalidReal occurs when the real built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = real(int(1))
+ InvalidReal
+
+ /* exprs > assertion */
+
+ // InvalidAssert occurs when a type assertion is applied to a
+ // value that is not of interface type.
+ //
+ // Example:
+ // var x = 1
+ // var _ = x.(float64)
+ InvalidAssert
+
+ // ImpossibleAssert occurs for a type assertion x.(T) when the value x of
+ // interface cannot have dynamic type T, due to a missing or mismatching
+ // method on T.
+ //
+ // Example:
+ // type T int
+ //
+ // func (t *T) m() int { return int(*t) }
+ //
+ // type I interface { m() int }
+ //
+ // var x I
+ // var _ = x.(T)
+ ImpossibleAssert
+
+ /* exprs > conversion */
+
+ // InvalidConversion occurs when the argument type cannot be converted to the
+ // target.
+ //
+ // See https://golang.org/ref/spec#Conversions for the rules of
+ // convertibility.
+ //
+ // Example:
+ // var x float64
+ // var _ = string(x)
+ InvalidConversion
+
+ // InvalidUntypedConversion occurs when there is no valid implicit
+ // conversion from an untyped value satisfying the type constraints of the
+ // context in which it is used.
+ //
+ // Example:
+ // var _ = 1 + ""
+ InvalidUntypedConversion
+
+ /* offsetof */
+
+ // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
+ // that is not a selector expression.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Offsetof(x)
+ BadOffsetofSyntax
+
+ // InvalidOffsetof occurs when unsafe.Offsetof is called with a method
+ // selector, rather than a field selector, or when the field is embedded via
+ // a pointer.
+ //
+ // Per the spec:
+ //
+ // "If f is an embedded field, it must be reachable without pointer
+ // indirections through fields of the struct. "
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T struct { f int }
+ // type S struct { *T }
+ // var s S
+ // var _ = unsafe.Offsetof(s.f)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type S struct{}
+ //
+ // func (S) m() {}
+ //
+ // var s S
+ // var _ = unsafe.Offsetof(s.m)
+ InvalidOffsetof
+
+ /* control flow > scope */
+
+ // UnusedExpr occurs when a side-effect free expression is used as a
+ // statement. Such a statement has no effect.
+ //
+ // Example:
+ // func f(i int) {
+ // i*i
+ // }
+ UnusedExpr
+
+ // UnusedVar occurs when a variable is declared but unused.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // }
+ UnusedVar
+
+ // MissingReturn occurs when a function with results is missing a return
+ // statement.
+ //
+ // Example:
+ // func f() int {}
+ MissingReturn
+
+ // WrongResultCount occurs when a return statement returns an incorrect
+ // number of values.
+ //
+ // Example:
+ // func ReturnOne() int {
+ // return 1, 2
+ // }
+ WrongResultCount
+
+ // OutOfScopeResult occurs when the name of a value implicitly returned by
+ // an empty return statement is shadowed in a nested scope.
+ //
+ // Example:
+ // func factor(n int) (i int) {
+ // for i := 2; i < n; i++ {
+ // if n%i == 0 {
+ // return
+ // }
+ // }
+ // return 0
+ // }
+ OutOfScopeResult
+
+ /* control flow > if */
+
+ // InvalidCond occurs when an if condition is not a boolean expression.
+ //
+ // Example:
+ // func checkReturn(i int) {
+ // if i {
+ // panic("non-zero return")
+ // }
+ // }
+ InvalidCond
+
+ /* control flow > for */
+
+ // InvalidPostDecl occurs when there is a declaration in a for-loop post
+ // statement.
+ //
+ // Example:
+ // func f() {
+ // for i := 0; i < 10; j := 0 {}
+ // }
+ InvalidPostDecl
+
+ // InvalidChanRange occurs when a send-only channel used in a range
+ // expression.
+ //
+ // Example:
+ // func sum(c chan<- int) {
+ // s := 0
+ // for i := range c {
+ // s += i
+ // }
+ // }
+ InvalidChanRange
+
+ // InvalidIterVar occurs when two iteration variables are used while ranging
+ // over a channel.
+ //
+ // Example:
+ // func f(c chan int) {
+ // for k, v := range c {
+ // println(k, v)
+ // }
+ // }
+ InvalidIterVar
+
+ // InvalidRangeExpr occurs when the type of a range expression is not array,
+ // slice, string, map, or channel.
+ //
+ // Example:
+ // func f(i int) {
+ // for j := range i {
+ // println(j)
+ // }
+ // }
+ InvalidRangeExpr
+
+ /* control flow > switch */
+
+ // MisplacedBreak occurs when a break statement is not within a for, switch,
+ // or select statement of the innermost function definition.
+ //
+ // Example:
+ // func f() {
+ // break
+ // }
+ MisplacedBreak
+
+ // MisplacedContinue occurs when a continue statement is not within a for
+ // loop of the innermost function definition.
+ //
+ // Example:
+ // func sumeven(n int) int {
+ // proceed := func() {
+ // continue
+ // }
+ // sum := 0
+ // for i := 1; i <= n; i++ {
+ // if i % 2 != 0 {
+ // proceed()
+ // }
+ // sum += i
+ // }
+ // return sum
+ // }
+ MisplacedContinue
+
+ // MisplacedFallthrough occurs when a fallthrough statement is not within an
+ // expression switch.
+ //
+ // Example:
+ // func typename(i interface{}) string {
+ // switch i.(type) {
+ // case int64:
+ // fallthrough
+ // case int:
+ // return "int"
+ // }
+ // return "unsupported"
+ // }
+ MisplacedFallthrough
+
+ // DuplicateCase occurs when a type or expression switch has duplicate
+ // cases.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // case 1:
+ // println("One")
+ // }
+ // }
+ DuplicateCase
+
+ // DuplicateDefault occurs when a type or expression switch has multiple
+ // default clauses.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // default:
+ // println("One")
+ // default:
+ // println("1")
+ // }
+ // }
+ DuplicateDefault
+
+ // BadTypeKeyword occurs when a .(type) expression is used anywhere other
+ // than a type switch.
+ //
+ // Example:
+ // type I interface {
+ // m()
+ // }
+ // var t I
+ // var _ = t.(type)
+ BadTypeKeyword
+
+ // InvalidTypeSwitch occurs when .(type) is used on an expression that is
+ // not of interface type.
+ //
+ // Example:
+ // func f(i int) {
+ // switch x := i.(type) {}
+ // }
+ InvalidTypeSwitch
+
+ // InvalidExprSwitch occurs when a switch expression is not comparable.
+ //
+ // Example:
+ // func _() {
+ // var a struct{ _ func() }
+ // switch a /* ERROR cannot switch on a */ {
+ // }
+ // }
+ InvalidExprSwitch
+
+ /* control flow > select */
+
+ // InvalidSelectCase occurs when a select case is not a channel send or
+ // receive.
+ //
+ // Example:
+ // func checkChan(c <-chan int) bool {
+ // select {
+ // case c:
+ // return true
+ // default:
+ // return false
+ // }
+ // }
+ InvalidSelectCase
+
+ /* control flow > labels and jumps */
+
+ // UndeclaredLabel occurs when an undeclared label is jumped to.
+ //
+ // Example:
+ // func f() {
+ // goto L
+ // }
+ UndeclaredLabel
+
+ // DuplicateLabel occurs when a label is declared more than once.
+ //
+ // Example:
+ // func f() int {
+ // L:
+ // L:
+ // return 1
+ // }
+ DuplicateLabel
+
+ // MisplacedLabel occurs when a break or continue label is not on a for,
+ // switch, or select statement.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // a := []int{1,2,3}
+ // for _, e := range a {
+ // if e > 10 {
+ // break L
+ // }
+ // println(a)
+ // }
+ // }
+ MisplacedLabel
+
+ // UnusedLabel occurs when a label is declared but not used.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // }
+ UnusedLabel
+
+ // JumpOverDecl occurs when a label jumps over a variable declaration.
+ //
+ // Example:
+ // func f() int {
+ // goto L
+ // x := 2
+ // L:
+ // x++
+ // return x
+ // }
+ JumpOverDecl
+
+ // JumpIntoBlock occurs when a forward jump goes to a label inside a nested
+ // block.
+ //
+ // Example:
+ // func f(x int) {
+ // goto L
+ // if x > 0 {
+ // L:
+ // print("inside block")
+ // }
+ // }
+ JumpIntoBlock
+
+ /* control flow > calls */
+
+ // InvalidMethodExpr occurs when a pointer method is called but the argument
+ // is not addressable.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (*T) m() int { return 1 }
+ //
+ // var _ = T.m(T{})
+ InvalidMethodExpr
+
+ // WrongArgCount occurs when too few or too many arguments are passed by a
+ // function call.
+ //
+ // Example:
+ // func f(i int) {}
+ // var x = f()
+ WrongArgCount
+
+ // InvalidCall occurs when an expression is called that is not of function
+ // type.
+ //
+ // Example:
+ // var x = "x"
+ // var y = x()
+ InvalidCall
+
+ /* control flow > suspended */
+
+ // UnusedResults occurs when a restricted expression-only built-in function
+ // is suspended via go or defer. Such a suspension discards the results of
+ // these side-effect free built-in functions, and therefore is ineffectual.
+ //
+ // Example:
+ // func f(a []int) int {
+ // defer len(a)
+ // return i
+ // }
+ UnusedResults
+
+ // InvalidDefer occurs when a deferred expression is not a function call,
+ // for example if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // defer int32(i)
+ // return i
+ // }
+ InvalidDefer
+
+ // InvalidGo occurs when a go expression is not a function call, for example
+ // if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // go int32(i)
+ // return i
+ // }
+ InvalidGo
+
+ // All codes below were added in Go 1.17.
+
+ /* decl */
+
+ // BadDecl occurs when a declaration has invalid syntax.
+ BadDecl
+
+ // RepeatedDecl occurs when an identifier occurs more than once on the left
+ // hand side of a short variable declaration.
+ //
+ // Example:
+ // func _() {
+ // x, y, y := 1, 2, 3
+ // }
+ RepeatedDecl
+
+ /* unsafe */
+
+ // InvalidUnsafeAdd occurs when unsafe.Add is called with a
+ // length argument that is not of integer type.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var p unsafe.Pointer
+ // var _ = unsafe.Add(p, float64(1))
+ InvalidUnsafeAdd
+
+ // InvalidUnsafeSlice occurs when unsafe.Slice is called with a
+ // pointer argument that is not of pointer type or a length argument
+ // that is not of integer type, negative, or out of bounds.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(x, 1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, float64(1))
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, -1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, uint64(1) << 63)
+ InvalidUnsafeSlice
+
+ // All codes below were added in Go 1.18.
+
+ /* features */
+
+ // UnsupportedFeature occurs when a language feature is used that is not
+ // supported at this Go version.
+ UnsupportedFeature
+
+ /* type params */
+
+ // NotAGenericType occurs when a non-generic type is used where a generic
+ // type is expected: in type or function instantiation.
+ //
+ // Example:
+ // type T int
+ //
+ // var _ T[int]
+ NotAGenericType
+
+ // WrongTypeArgCount occurs when a type or function is instantiated with an
+ // incorrect number of type arguments, including when a generic type or
+ // function is used without instantiation.
+ //
+ // Errors involving failed type inference are assigned other error codes.
+ //
+ // Example:
+ // type T[p any] int
+ //
+ // var _ T[int, string]
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // var x = f
+ WrongTypeArgCount
+
+ // CannotInferTypeArgs occurs when type or function type argument inference
+ // fails to infer all type arguments.
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // func _() {
+ // f()
+ // }
+ //
+ // Example:
+ // type N[P, Q any] struct{}
+ //
+ // var _ N[int]
+ CannotInferTypeArgs
+
+ // InvalidTypeArg occurs when a type argument does not satisfy its
+ // corresponding type parameter constraints.
+ //
+ // Example:
+ // type T[P ~int] struct{}
+ //
+ // var _ T[string]
+ InvalidTypeArg // arguments? InferenceFailed
+
+ // InvalidInstanceCycle occurs when an invalid cycle is detected
+ // within the instantiation graph.
+ //
+ // Example:
+ // func f[T any]() { f[*T]() }
+ InvalidInstanceCycle
+
+ // InvalidUnion occurs when an embedded union or approximation element is
+ // not valid.
+ //
+ // Example:
+ // type _ interface {
+ // ~int | interface{ m() }
+ // }
+ InvalidUnion
+
+ // MisplacedConstraintIface occurs when a constraint-type interface is used
+ // outside of constraint position.
+ //
+ // Example:
+ // type I interface { ~int }
+ //
+ // var _ I
+ MisplacedConstraintIface
+
+ // InvalidMethodTypeParams occurs when methods have type parameters.
+ //
+ // It cannot be encountered with an AST parsed using go/parser.
+ InvalidMethodTypeParams
+
+ // MisplacedTypeParam occurs when a type parameter is used in a place where
+ // it is not permitted.
+ //
+ // Example:
+ // type T[P any] P
+ //
+ // Example:
+ // type T[P any] struct{ *P }
+ MisplacedTypeParam
+
+ // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
+ // an argument that is not of slice type. It also occurs if it is used
+ // in a package compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.SliceData(x)
+ InvalidUnsafeSliceData
+
+ // InvalidUnsafeString occurs when unsafe.String is called with
+ // a length argument that is not of integer type, negative, or
+ // out of bounds. It also occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var b [10]byte
+ // var _ = unsafe.String(&b[0], -1)
+ InvalidUnsafeString
+
+ // InvalidUnsafeStringData occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ _ // not used anymore
+
+)
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
new file mode 100644
index 0000000000..15ecf7c5de
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
@@ -0,0 +1,179 @@
+// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT.
+
+package typesinternal
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSyntaxTree - -1]
+ _ = x[Test-1]
+ _ = x[BlankPkgName-2]
+ _ = x[MismatchedPkgName-3]
+ _ = x[InvalidPkgUse-4]
+ _ = x[BadImportPath-5]
+ _ = x[BrokenImport-6]
+ _ = x[ImportCRenamed-7]
+ _ = x[UnusedImport-8]
+ _ = x[InvalidInitCycle-9]
+ _ = x[DuplicateDecl-10]
+ _ = x[InvalidDeclCycle-11]
+ _ = x[InvalidTypeCycle-12]
+ _ = x[InvalidConstInit-13]
+ _ = x[InvalidConstVal-14]
+ _ = x[InvalidConstType-15]
+ _ = x[UntypedNilUse-16]
+ _ = x[WrongAssignCount-17]
+ _ = x[UnassignableOperand-18]
+ _ = x[NoNewVar-19]
+ _ = x[MultiValAssignOp-20]
+ _ = x[InvalidIfaceAssign-21]
+ _ = x[InvalidChanAssign-22]
+ _ = x[IncompatibleAssign-23]
+ _ = x[UnaddressableFieldAssign-24]
+ _ = x[NotAType-25]
+ _ = x[InvalidArrayLen-26]
+ _ = x[BlankIfaceMethod-27]
+ _ = x[IncomparableMapKey-28]
+ _ = x[InvalidIfaceEmbed-29]
+ _ = x[InvalidPtrEmbed-30]
+ _ = x[BadRecv-31]
+ _ = x[InvalidRecv-32]
+ _ = x[DuplicateFieldAndMethod-33]
+ _ = x[DuplicateMethod-34]
+ _ = x[InvalidBlank-35]
+ _ = x[InvalidIota-36]
+ _ = x[MissingInitBody-37]
+ _ = x[InvalidInitSig-38]
+ _ = x[InvalidInitDecl-39]
+ _ = x[InvalidMainDecl-40]
+ _ = x[TooManyValues-41]
+ _ = x[NotAnExpr-42]
+ _ = x[TruncatedFloat-43]
+ _ = x[NumericOverflow-44]
+ _ = x[UndefinedOp-45]
+ _ = x[MismatchedTypes-46]
+ _ = x[DivByZero-47]
+ _ = x[NonNumericIncDec-48]
+ _ = x[UnaddressableOperand-49]
+ _ = x[InvalidIndirection-50]
+ _ = x[NonIndexableOperand-51]
+ _ = x[InvalidIndex-52]
+ _ = x[SwappedSliceIndices-53]
+ _ = x[NonSliceableOperand-54]
+ _ = x[InvalidSliceExpr-55]
+ _ = x[InvalidShiftCount-56]
+ _ = x[InvalidShiftOperand-57]
+ _ = x[InvalidReceive-58]
+ _ = x[InvalidSend-59]
+ _ = x[DuplicateLitKey-60]
+ _ = x[MissingLitKey-61]
+ _ = x[InvalidLitIndex-62]
+ _ = x[OversizeArrayLit-63]
+ _ = x[MixedStructLit-64]
+ _ = x[InvalidStructLit-65]
+ _ = x[MissingLitField-66]
+ _ = x[DuplicateLitField-67]
+ _ = x[UnexportedLitField-68]
+ _ = x[InvalidLitField-69]
+ _ = x[UntypedLit-70]
+ _ = x[InvalidLit-71]
+ _ = x[AmbiguousSelector-72]
+ _ = x[UndeclaredImportedName-73]
+ _ = x[UnexportedName-74]
+ _ = x[UndeclaredName-75]
+ _ = x[MissingFieldOrMethod-76]
+ _ = x[BadDotDotDotSyntax-77]
+ _ = x[NonVariadicDotDotDot-78]
+ _ = x[MisplacedDotDotDot-79]
+ _ = x[InvalidDotDotDotOperand-80]
+ _ = x[InvalidDotDotDot-81]
+ _ = x[UncalledBuiltin-82]
+ _ = x[InvalidAppend-83]
+ _ = x[InvalidCap-84]
+ _ = x[InvalidClose-85]
+ _ = x[InvalidCopy-86]
+ _ = x[InvalidComplex-87]
+ _ = x[InvalidDelete-88]
+ _ = x[InvalidImag-89]
+ _ = x[InvalidLen-90]
+ _ = x[SwappedMakeArgs-91]
+ _ = x[InvalidMake-92]
+ _ = x[InvalidReal-93]
+ _ = x[InvalidAssert-94]
+ _ = x[ImpossibleAssert-95]
+ _ = x[InvalidConversion-96]
+ _ = x[InvalidUntypedConversion-97]
+ _ = x[BadOffsetofSyntax-98]
+ _ = x[InvalidOffsetof-99]
+ _ = x[UnusedExpr-100]
+ _ = x[UnusedVar-101]
+ _ = x[MissingReturn-102]
+ _ = x[WrongResultCount-103]
+ _ = x[OutOfScopeResult-104]
+ _ = x[InvalidCond-105]
+ _ = x[InvalidPostDecl-106]
+ _ = x[InvalidChanRange-107]
+ _ = x[InvalidIterVar-108]
+ _ = x[InvalidRangeExpr-109]
+ _ = x[MisplacedBreak-110]
+ _ = x[MisplacedContinue-111]
+ _ = x[MisplacedFallthrough-112]
+ _ = x[DuplicateCase-113]
+ _ = x[DuplicateDefault-114]
+ _ = x[BadTypeKeyword-115]
+ _ = x[InvalidTypeSwitch-116]
+ _ = x[InvalidExprSwitch-117]
+ _ = x[InvalidSelectCase-118]
+ _ = x[UndeclaredLabel-119]
+ _ = x[DuplicateLabel-120]
+ _ = x[MisplacedLabel-121]
+ _ = x[UnusedLabel-122]
+ _ = x[JumpOverDecl-123]
+ _ = x[JumpIntoBlock-124]
+ _ = x[InvalidMethodExpr-125]
+ _ = x[WrongArgCount-126]
+ _ = x[InvalidCall-127]
+ _ = x[UnusedResults-128]
+ _ = x[InvalidDefer-129]
+ _ = x[InvalidGo-130]
+ _ = x[BadDecl-131]
+ _ = x[RepeatedDecl-132]
+ _ = x[InvalidUnsafeAdd-133]
+ _ = x[InvalidUnsafeSlice-134]
+ _ = x[UnsupportedFeature-135]
+ _ = x[NotAGenericType-136]
+ _ = x[WrongTypeArgCount-137]
+ _ = x[CannotInferTypeArgs-138]
+ _ = x[InvalidTypeArg-139]
+ _ = x[InvalidInstanceCycle-140]
+ _ = x[InvalidUnion-141]
+ _ = x[MisplacedConstraintIface-142]
+ _ = x[InvalidMethodTypeParams-143]
+ _ = x[MisplacedTypeParam-144]
+ _ = x[InvalidUnsafeSliceData-145]
+ _ = x[InvalidUnsafeString-146]
+}
+
+const (
+ _ErrorCode_name_0 = "InvalidSyntaxTree"
+ _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
+)
+
+var (
+ _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180}
+)
+
+func (i ErrorCode) String() string {
+ switch {
+ case i == -1:
+ return _ErrorCode_name_0
+ case 1 <= i && i <= 146:
+ i -= 1
+ return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
+ default:
+ return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
new file mode 100644
index 0000000000..93acff2170
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
@@ -0,0 +1,49 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// NoEffects reports whether the expression has no side effects, i.e., it
+// does not modify the memory state. This function is conservative: it may
+// return false even when the expression has no effect.
+func NoEffects(info *types.Info, expr ast.Expr) bool {
+ noEffects := true
+ ast.Inspect(expr, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr,
+ *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr,
+ *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType,
+ *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr:
+ // No effect
+ case *ast.UnaryExpr:
+ // Channel send <-ch has effects
+ if v.Op == token.ARROW {
+ noEffects = false
+ }
+ case *ast.CallExpr:
+ // Type conversion has no effects
+ if !info.Types[v.Fun].IsType() {
+ // TODO(adonovan): Add a case for built-in functions without side
+ // effects (by using callsPureBuiltin from tools/internal/refactor/inline)
+
+ noEffects = false
+ }
+ case *ast.FuncLit:
+ // A FuncLit has no effects, but do not descend into it.
+ return false
+ default:
+ // All other expressions have effects
+ noEffects = false
+ }
+
+ return noEffects
+ })
+ return noEffects
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
new file mode 100644
index 0000000000..f2affec4fb
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
@@ -0,0 +1,71 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+ "slices"
+)
+
+// IsTypeNamed reports whether t is (or is an alias for) a
+// package-level defined type with the given package path and one of
+// the given names. It returns false if t is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool {
+ if named, ok := types.Unalias(t).(*types.Named); ok {
+ tname := named.Obj()
+ return tname != nil &&
+ IsPackageLevel(tname) &&
+ tname.Pkg().Path() == pkgPath &&
+ slices.Contains(names, tname.Name())
+ }
+ return false
+}
+
+// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a
+// package-level defined type with the given package path and one of the given
+// names. It returns false if t is not a pointer type.
+func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool {
+ r := Unpointer(t)
+ if r == t {
+ return false
+ }
+ return IsTypeNamed(r, pkgPath, names...)
+}
+
+// IsFunctionNamed reports whether obj is a package-level function
+// defined in the given package and has one of the given names.
+// It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool {
+ f, ok := obj.(*types.Func)
+ return ok &&
+ IsPackageLevel(obj) &&
+ f.Pkg().Path() == pkgPath &&
+ f.Type().(*types.Signature).Recv() == nil &&
+ slices.Contains(names, f.Name())
+}
+
+// IsMethodNamed reports whether obj is a method defined on a
+// package-level type with the given package and type name, and has
+// one of the given names. It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.TypeName.Name",
+// which is important for the performance of syntax matching.
+func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool {
+ if fn, ok := obj.(*types.Func); ok {
+ if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
+ _, T := ReceiverNamed(recv)
+ return T != nil &&
+ IsTypeNamed(T, pkgPath, typeName) &&
+ slices.Contains(names, fn.Name())
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
new file mode 100644
index 0000000000..64f47919f0
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -0,0 +1,54 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/types"
+ "strconv"
+)
+
+// FileQualifier returns a [types.Qualifier] function that qualifies
+// imported symbols appropriately based on the import environment of a given
+// file.
+// If the same package is imported multiple times, the last appearance is
+// recorded.
+//
+// TODO(adonovan): this function ignores the effect of shadowing. It
+// should accept a [token.Pos] and a [types.Info] and compute only the
+// set of imports that are not shadowed at that point, analogous to
+// [analysisinternal.AddImport]. It could also compute (as a side
+// effect) the set of additional imports required to ensure that there
+// is an accessible import for each necessary package, making it
+// converge even more closely with AddImport.
+func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
+ // Construct mapping of import paths to their defined names.
+ // It is only necessary to look at renaming imports.
+ imports := make(map[string]string)
+ for _, imp := range f.Imports {
+ if imp.Name != nil && imp.Name.Name != "_" {
+ path, _ := strconv.Unquote(imp.Path.Value)
+ imports[path] = imp.Name.Name
+ }
+ }
+
+ // Define qualifier to replace full package paths with names of the imports.
+ return func(p *types.Package) string {
+ if p == nil || p == pkg {
+ return ""
+ }
+
+ if name, ok := imports[p.Path()]; ok {
+ if name == "." {
+ return ""
+ } else {
+ return name
+ }
+ }
+
+ // If there is no local renaming, fall back to the package name.
+ return p.Name()
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
new file mode 100644
index 0000000000..8352ea7617
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -0,0 +1,44 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+)
+
+// ReceiverNamed returns the named type (if any) associated with the
+// type of recv, which may be of the form N or *N, or aliases thereof.
+// It also reports whether a Pointer was present.
+//
+// The named result may be nil if recv is from a method on an
+// anonymous interface or struct types or in ill-typed code.
+func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
+ t := recv.Type()
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
+ isPtr = true
+ t = ptr.Elem()
+ }
+ named, _ = types.Unalias(t).(*types.Named)
+ return
+}
+
+// Unpointer returns T given *T or an alias thereof.
+// For all other types it is the identity function.
+// It does not look at underlying types.
+// The result may be an alias.
+//
+// Use this function to strip off the optional pointer on a receiver
+// in a field or method selection, without losing the named type
+// (which is needed to compute the method set).
+//
+// See also [typeparams.MustDeref], which removes one level of
+// indirection from the type, regardless of named types (analogous to
+// a LOAD instruction).
+func Unpointer(t types.Type) types.Type {
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ return t
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/vendor/golang.org/x/tools/internal/typesinternal/toonew.go
new file mode 100644
index 0000000000..cc86487eaa
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/toonew.go
@@ -0,0 +1,89 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/stdlib"
+ "golang.org/x/tools/internal/versions"
+)
+
+// TooNewStdSymbols computes the set of package-level symbols
+// exported by pkg that are not available at the specified version.
+// The result maps each symbol to its minimum version.
+//
+// The pkg is allowed to contain type errors.
+func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string {
+ disallowed := make(map[types.Object]string)
+
+ // Pass 1: package-level symbols.
+ symbols := stdlib.PackageSymbols[pkg.Path()]
+ for _, sym := range symbols {
+ symver := sym.Version.String()
+ if versions.Before(version, symver) {
+ switch sym.Kind {
+ case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type:
+ disallowed[pkg.Scope().Lookup(sym.Name)] = symver
+ }
+ }
+ }
+
+ // Pass 2: fields and methods.
+ //
+ // We allow fields and methods if their associated type is
+ // disallowed, as otherwise we would report false positives
+ // for compatibility shims. Consider:
+ //
+ // //go:build go1.22
+ // type T struct { F std.Real } // correct new API
+ //
+ // //go:build !go1.22
+ // type T struct { F fake } // shim
+ // type fake struct { ... }
+ // func (fake) M () {}
+ //
+ // These alternative declarations of T use either the std.Real
+ // type, introduced in go1.22, or a fake type, for the field
+ // F. (The fakery could be arbitrarily deep, involving more
+ // nested fields and methods than are shown here.) Clients
+ // that use the compatibility shim T will compile with any
+ // version of go, whether older or newer than go1.22, but only
+ // the newer version will use the std.Real implementation.
+ //
+ // Now consider a reference to method M in new(T).F.M() in a
+ // module that requires a minimum of go1.21. The analysis may
+ // occur using a version of Go higher than 1.21, selecting the
+ // first version of T, so the method M is Real.M. This would
+ // spuriously cause the analyzer to report a reference to a
+ // too-new symbol even though this expression compiles just
+ // fine (with the fake implementation) using go1.21.
+ for _, sym := range symbols {
+ symVersion := sym.Version.String()
+ if !versions.Before(version, symVersion) {
+ continue // allowed
+ }
+
+ var obj types.Object
+ switch sym.Kind {
+ case stdlib.Field:
+ typename, name := sym.SplitField()
+ if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" {
+ obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name)
+ }
+
+ case stdlib.Method:
+ ptr, recvname, name := sym.SplitMethod()
+ if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" {
+ obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name)
+ }
+ }
+ if obj != nil {
+ disallowed[obj] = symVersion
+ }
+ }
+
+ return disallowed
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
new file mode 100644
index 0000000000..fef74a7856
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -0,0 +1,199 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typesinternal provides helpful operators for dealing with
+// go/types:
+//
+// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]);
+// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]);
+// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]);
+// - access to internal go/types APIs that are not yet
+// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and
+// - common algorithms related to types (e.g. [TooNewStdSymbols]).
+//
+// See also:
+// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax;
+// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers;
+// - [golang.org/x/tools/internal/refactor], for operators to compute text edits.
+package typesinternal
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+ "reflect"
+ "unsafe"
+
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/aliases"
+)
+
+func SetUsesCgo(conf *types.Config) bool {
+ v := reflect.ValueOf(conf).Elem()
+
+ f := v.FieldByName("go115UsesCgo")
+ if !f.IsValid() {
+ f = v.FieldByName("UsesCgo")
+ if !f.IsValid() {
+ return false
+ }
+ }
+
+ addr := unsafe.Pointer(f.UnsafeAddr())
+ *(*bool)(addr) = true
+
+ return true
+}
+
+// ErrorCodeStartEnd extracts additional information from types.Error values
+// generated by Go version 1.16 and later: the error code, start position, and
+// end position. If all positions are valid, start <= err.Pos <= end.
+//
+// If the data could not be read, the final result parameter will be false.
+//
+// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
+func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
+ var data [3]int
+ // By coincidence all of these fields are ints, which simplifies things.
+ v := reflect.ValueOf(err)
+ for i, name := range []string{"go116code", "go116start", "go116end"} {
+ f := v.FieldByName(name)
+ if !f.IsValid() {
+ return 0, 0, 0, false
+ }
+ data[i] = int(f.Int())
+ }
+ return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
+}
+
+// NameRelativeTo returns a types.Qualifier that qualifies members of
+// all packages other than pkg, using only the package name.
+// (By contrast, [types.RelativeTo] uses the complete package path,
+// which is often excessive.)
+//
+// If pkg is nil, it is equivalent to [*types.Package.Name].
+//
+// TODO(adonovan): all uses of this with TypeString should be
+// eliminated when https://go.dev/issues/75604 is resolved.
+func NameRelativeTo(pkg *types.Package) types.Qualifier {
+ return func(other *types.Package) string {
+ if pkg != nil && pkg == other {
+ return "" // same package; unqualified
+ }
+ return other.Name()
+ }
+}
+
+// TypeNameFor returns the type name symbol for the specified type, if
+// it is a [*types.Alias], [*types.Named], [*types.TypeParam], or a
+// [*types.Basic] representing a type.
+//
+// For all other types, and for Basic types representing a builtin,
+// constant, or nil, it returns nil. Be careful not to convert the
+// resulting nil pointer to a [types.Object]!
+//
+// If t is the type of a constant, it may be an "untyped" type, which
+// has no TypeName. To access the name of such types (e.g. "untyped
+// int"), use [types.Basic.Name].
+func TypeNameFor(t types.Type) *types.TypeName {
+ switch t := t.(type) {
+ case *types.Alias:
+ return t.Obj()
+ case *types.Named:
+ return t.Obj()
+ case *types.TypeParam:
+ return t.Obj()
+ case *types.Basic:
+ // See issues #71886 and #66890 for some history.
+ if tname, ok := types.Universe.Lookup(t.Name()).(*types.TypeName); ok {
+ return tname
+ }
+ }
+ return nil
+}
+
+// A NamedOrAlias is a [types.Type] that is named (as
+// defined by the spec) and capable of bearing type parameters: it
+// abstracts aliases ([types.Alias]) and defined types
+// ([types.Named]).
+//
+// Every type declared by an explicit "type" declaration is a
+// NamedOrAlias. (Built-in type symbols may additionally
+// have type [types.Basic], which is not a NamedOrAlias,
+// though the spec regards them as "named"; see [TypeNameFor].)
+//
+// NamedOrAlias cannot expose the Origin method, because
+// [types.Alias.Origin] and [types.Named.Origin] have different
+// (covariant) result types; use [Origin] instead.
+type NamedOrAlias interface {
+ types.Type
+ Obj() *types.TypeName
+ TypeArgs() *types.TypeList
+ TypeParams() *types.TypeParamList
+ SetTypeParams(tparams []*types.TypeParam)
+}
+
+var (
+ _ NamedOrAlias = (*types.Alias)(nil)
+ _ NamedOrAlias = (*types.Named)(nil)
+)
+
+// Origin returns the generic type of the Named or Alias type t if it
+// is instantiated, otherwise it returns t.
+func Origin(t NamedOrAlias) NamedOrAlias {
+ switch t := t.(type) {
+ case *types.Alias:
+ return aliases.Origin(t)
+ case *types.Named:
+ return t.Origin()
+ }
+ return t
+}
+
+// IsPackageLevel reports whether obj is a package-level symbol.
+func IsPackageLevel(obj types.Object) bool {
+ return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
+}
+
+// NewTypesInfo returns a *types.Info with all maps populated.
+func NewTypesInfo() *types.Info {
+ return &types.Info{
+ Types: map[ast.Expr]types.TypeAndValue{},
+ Instances: map[*ast.Ident]types.Instance{},
+ Defs: map[*ast.Ident]types.Object{},
+ Uses: map[*ast.Ident]types.Object{},
+ Implicits: map[ast.Node]types.Object{},
+ Selections: map[*ast.SelectorExpr]*types.Selection{},
+ Scopes: map[ast.Node]*types.Scope{},
+ FileVersions: map[*ast.File]string{},
+ }
+}
+
+// EnclosingScope returns the innermost block logically enclosing the cursor.
+func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope {
+ for cur := range cur.Enclosing() {
+ n := cur.Node()
+ // A function's Scope is associated with its FuncType.
+ switch f := n.(type) {
+ case *ast.FuncDecl:
+ n = f.Type
+ case *ast.FuncLit:
+ n = f.Type
+ }
+ if b := info.Scopes[n]; b != nil {
+ return b
+ }
+ }
+ panic("no Scope for *ast.File")
+}
+
+// Imports reports whether path is imported by pkg.
+func Imports(pkg *types.Package, path string) bool {
+ for _, imp := range pkg.Imports() {
+ if imp.Path() == path {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
new file mode 100644
index 0000000000..e5da049511
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
@@ -0,0 +1,40 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
+// this API that actually does something.
+
+import "go/types"
+
+type VarKind uint8
+
+const (
+ _ VarKind = iota // (not meaningful)
+ PackageVar // a package-level variable
+ LocalVar // a local variable
+ RecvVar // a method receiver variable
+ ParamVar // a function parameter variable
+ ResultVar // a function result variable
+ FieldVar // a struct field
+)
+
+func (kind VarKind) String() string {
+ return [...]string{
+ 0: "VarKind(0)",
+ PackageVar: "PackageVar",
+ LocalVar: "LocalVar",
+ RecvVar: "RecvVar",
+ ParamVar: "ParamVar",
+ ResultVar: "ResultVar",
+ FieldVar: "FieldVar",
+ }[kind]
+}
+
+// GetVarKind returns an invalid VarKind.
+func GetVarKind(v *types.Var) VarKind { return 0 }
+
+// SetVarKind has no effect.
+func SetVarKind(v *types.Var, kind VarKind) {}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 0000000000..453bba2ad5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,381 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strings"
+)
+
+// ZeroString returns the string representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroString may return a partially correct
+// string representation. The caller should use the returned isValid boolean
+// to determine the validity of the expression.
+//
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+//
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// References to named types are qualified by an appropriate (optional)
+// qualifier function.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+//
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return "false", true
+ case t.Info()&types.IsNumeric != 0:
+ return "0", true
+ case t.Info()&types.IsString != 0:
+ return `""`, true
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return "nil", true
+ case t.Kind() == types.Invalid:
+ return "invalid", false
+ default:
+ panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return "nil", true
+
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return "invalid", false
+ }
+ return "nil", true
+
+ case *types.Named:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qual) + "{}", true
+ default:
+ return ZeroString(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qual) + "{}", true
+ default:
+ // A type parameter can have alias but alias type's underlying type
+ // can never be a type parameter.
+ // Use types.Unalias to preserve the info of type parameter instead
+ // of call Underlying() going right through and get the underlying
+ // type of the type parameter which is always an interface.
+ return ZeroString(types.Unalias(t), qual)
+ }
+
+ case *types.Array, *types.Struct:
+ return types.TypeString(t, qual) + "{}", true
+
+ case *types.TypeParam:
+ // Assumes func new is not shadowed.
+ return "*new(" + types.TypeString(t, qual) + ")", true
+
+ case *types.Tuple:
+ // Tuples are not normal values.
+ // We are currently format as "(t[0], ..., t[n])". Could be something else.
+ isValid := true
+ components := make([]string, t.Len())
+ for i := 0; i < t.Len(); i++ {
+ comp, ok := ZeroString(t.At(i).Type(), qual)
+
+ components[i] = comp
+ isValid = isValid && ok
+ }
+ return "(" + strings.Join(components, ", ") + ")", isValid
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
+// representation. The caller should use the returned isValid boolean to determine
+// the validity of the expression.
+//
+// This function is designed for types suitable for variables and should not be
+// used with Tuple or Union types.References to named types are qualified by an
+// appropriate (optional) qualifier function.
+//
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return &ast.Ident{Name: "false"}, true
+ case t.Info()&types.IsNumeric != 0:
+ return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
+ case t.Info()&types.IsString != 0:
+ return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return ast.NewIdent("nil"), true
+ case t.Kind() == types.Invalid:
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+ default:
+ panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return ast.NewIdent("nil"), true
+
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+ }
+ return ast.NewIdent("nil"), true
+
+ case *types.Named:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+ default:
+ return ZeroExpr(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+ default:
+ return ZeroExpr(types.Unalias(t), qual)
+ }
+
+ case *types.Array, *types.Struct:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+
+ case *types.TypeParam:
+ return &ast.StarExpr{ // *new(T)
+ X: &ast.CallExpr{
+ // Assumes func new is not shadowed.
+ Fun: ast.NewIdent("new"),
+ Args: []ast.Expr{
+ ast.NewIdent(t.Obj().Name()),
+ },
+ },
+ }, true
+
+ case *types.Tuple:
+ // Unlike ZeroString, there is no ast.Expr can express tuple by
+ // "(t[0], ..., t[n])".
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// are qualified by an appropriate (optional) qualifier function.
+// It may panic for types such as Tuple or Union.
+//
+// See also https://go.dev/issues/75604, which will provide a robust
+// Type-to-valid-Go-syntax formatter.
+func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch t.Kind() {
+ case types.UnsafePointer:
+ return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
+ default:
+ return ast.NewIdent(t.Name())
+ }
+
+ case *types.Pointer:
+ return &ast.UnaryExpr{
+ Op: token.MUL,
+ X: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Array:
+ return &ast.ArrayType{
+ Len: &ast.BasicLit{
+ Kind: token.INT,
+ Value: fmt.Sprintf("%d", t.Len()),
+ },
+ Elt: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Slice:
+ return &ast.ArrayType{
+ Elt: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Map:
+ return &ast.MapType{
+ Key: TypeExpr(t.Key(), qual),
+ Value: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Chan:
+ dir := ast.ChanDir(t.Dir())
+ if t.Dir() == types.SendRecv {
+ dir = ast.SEND | ast.RECV
+ }
+ return &ast.ChanType{
+ Dir: dir,
+ Value: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Signature:
+ var params []*ast.Field
+ for i := 0; i < t.Params().Len(); i++ {
+ params = append(params, &ast.Field{
+ Type: TypeExpr(t.Params().At(i).Type(), qual),
+ Names: []*ast.Ident{
+ {
+ Name: t.Params().At(i).Name(),
+ },
+ },
+ })
+ }
+ if t.Variadic() {
+ last := params[len(params)-1]
+ last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+ }
+ var returns []*ast.Field
+ for i := 0; i < t.Results().Len(); i++ {
+ returns = append(returns, &ast.Field{
+ Type: TypeExpr(t.Results().At(i).Type(), qual),
+ })
+ }
+ return &ast.FuncType{
+ Params: &ast.FieldList{
+ List: params,
+ },
+ Results: &ast.FieldList{
+ List: returns,
+ },
+ }
+
+ case *types.TypeParam:
+ pkgName := qual(t.Obj().Pkg())
+ if pkgName == "" || t.Obj().Pkg() == nil {
+ return ast.NewIdent(t.Obj().Name())
+ }
+ return &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: ast.NewIdent(t.Obj().Name()),
+ }
+
+ // types.TypeParam also implements interface NamedOrAlias. To differentiate,
+ // case TypeParam need to be present before case NamedOrAlias.
+ // TODO(hxjiang): remove this comment once TypeArgs() is added to interface
+ // NamedOrAlias.
+ case NamedOrAlias:
+ var expr ast.Expr = ast.NewIdent(t.Obj().Name())
+ if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
+ expr = &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: expr.(*ast.Ident),
+ }
+ }
+
+ // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
+ // typesinternal.NamedOrAlias.
+ if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
+ if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
+ var indices []ast.Expr
+ for i := range typeArgs.Len() {
+ indices = append(indices, TypeExpr(typeArgs.At(i), qual))
+ }
+ expr = &ast.IndexListExpr{
+ X: expr,
+ Indices: indices,
+ }
+ }
+ }
+
+ return expr
+
+ case *types.Struct:
+ return ast.NewIdent(t.String())
+
+ case *types.Interface:
+ return ast.NewIdent(t.String())
+
+ case *types.Union:
+ if t.Len() == 0 {
+ panic("Union type should have at least one term")
+ }
+ // Same as go/ast, the return expression will put last term in the
+ // Y field at topmost level of BinaryExpr.
+ // For union of type "float32 | float64 | int64", the structure looks
+ // similar to:
+ // {
+ // X: {
+ // X: float32,
+ // Op: |
+ // Y: float64,
+ // }
+ // Op: |,
+ // Y: int64,
+ // }
+ var union ast.Expr
+ for i := range t.Len() {
+ term := t.Term(i)
+ termExpr := TypeExpr(term.Type(), qual)
+ if term.Tilde() {
+ termExpr = &ast.UnaryExpr{
+ Op: token.TILDE,
+ X: termExpr,
+ }
+ }
+ if i == 0 {
+ union = termExpr
+ } else {
+ union = &ast.BinaryExpr{
+ X: union,
+ Op: token.OR,
+ Y: termExpr,
+ }
+ }
+ }
+ return union
+
+ case *types.Tuple:
+ panic("invalid input type types.Tuple")
+
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go
new file mode 100644
index 0000000000..b53f178616
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/features.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+// This file contains predicates for working with file versions to
+// decide when a tool should consider a language feature enabled.
+
+// GoVersions that features in x/tools can be gated to.
+const (
+ Go1_18 = "go1.18"
+ Go1_19 = "go1.19"
+ Go1_20 = "go1.20"
+ Go1_21 = "go1.21"
+ Go1_22 = "go1.22"
+)
+
+// Future is an invalid unknown Go version sometime in the future.
+// Do not use directly with Compare.
+const Future = ""
+
+// AtLeast reports whether the file version v comes after a Go release.
+//
+// Use this predicate to enable a behavior once a certain Go release
+// has happened (and stays enabled in the future).
+func AtLeast(v, release string) bool {
+ if v == Future {
+ return true // an unknown future version is always after y.
+ }
+ return Compare(Lang(v), Lang(release)) >= 0
+}
+
+// Before reports whether the file version v is strictly before a Go release.
+//
+// Use this predicate to disable a behavior once a certain Go release
+// has happened (and stays enabled in the future).
+func Before(v, release string) bool {
+ if v == Future {
+ return false // an unknown future version happens after y.
+ }
+ return Compare(Lang(v), Lang(release)) < 0
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go
new file mode 100644
index 0000000000..bbabcd22e9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/gover.go
@@ -0,0 +1,172 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a fork of internal/gover for use by x/tools until
+// go1.21 and earlier are no longer supported by x/tools.
+
+package versions
+
+import "strings"
+
+// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]]
+// The numbers are the original decimal strings to avoid integer overflows
+// and since there is very little actual math. (Probably overflow doesn't matter in practice,
+// but at the time this code was written, there was an existing test that used
+// go1.99999999999, which does not fit in an int on 32-bit platforms.
+// The "big decimal" representation avoids the problem entirely.)
+type gover struct {
+ major string // decimal
+ minor string // decimal or ""
+ patch string // decimal or ""
+ kind string // "", "alpha", "beta", "rc"
+ pre string // decimal or ""
+}
+
+// compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as toolchain versions.
+// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
+// Malformed versions compare less than well-formed versions and equal to each other.
+// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
+func compare(x, y string) int {
+ vx := parse(x)
+ vy := parse(y)
+
+ if c := cmpInt(vx.major, vy.major); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.minor, vy.minor); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.patch, vy.patch); c != 0 {
+ return c
+ }
+ if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
+ return c
+ }
+ if c := cmpInt(vx.pre, vy.pre); c != 0 {
+ return c
+ }
+ return 0
+}
+
+// lang returns the Go language version. For example, lang("1.2.3") == "1.2".
+func lang(x string) string {
+ v := parse(x)
+ if v.minor == "" || v.major == "1" && v.minor == "0" {
+ return v.major
+ }
+ return v.major + "." + v.minor
+}
+
+// isValid reports whether the version x is valid.
+func isValid(x string) bool {
+ return parse(x) != gover{}
+}
+
+// parse parses the Go version string x into a version.
+// It returns the zero version if x is malformed.
+func parse(x string) gover {
+ var v gover
+
+ // Parse major version.
+ var ok bool
+ v.major, x, ok = cutInt(x)
+ if !ok {
+ return gover{}
+ }
+ if x == "" {
+ // Interpret "1" as "1.0.0".
+ v.minor = "0"
+ v.patch = "0"
+ return v
+ }
+
+ // Parse . before minor version.
+ if x[0] != '.' {
+ return gover{}
+ }
+
+ // Parse minor version.
+ v.minor, x, ok = cutInt(x[1:])
+ if !ok {
+ return gover{}
+ }
+ if x == "" {
+ // Patch missing is same as "0" for older versions.
+ // Starting in Go 1.21, patch missing is different from explicit .0.
+ if cmpInt(v.minor, "21") < 0 {
+ v.patch = "0"
+ }
+ return v
+ }
+
+ // Parse patch if present.
+ if x[0] == '.' {
+ v.patch, x, ok = cutInt(x[1:])
+ if !ok || x != "" {
+ // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
+ // Allowing them would be a bit confusing because we already have:
+ // 1.21 < 1.21rc1
+ // But a prerelease of a patch would have the opposite effect:
+ // 1.21.3rc1 < 1.21.3
+ // We've never needed them before, so let's not start now.
+ return gover{}
+ }
+ return v
+ }
+
+ // Parse prerelease.
+ i := 0
+ for i < len(x) && (x[i] < '0' || '9' < x[i]) {
+ if x[i] < 'a' || 'z' < x[i] {
+ return gover{}
+ }
+ i++
+ }
+ if i == 0 {
+ return gover{}
+ }
+ v.kind, x = x[:i], x[i:]
+ if x == "" {
+ return v
+ }
+ v.pre, x, ok = cutInt(x)
+ if !ok || x != "" {
+ return gover{}
+ }
+
+ return v
+}
+
+// cutInt scans the leading decimal number at the start of x to an integer
+// and returns that value and the rest of the string.
+func cutInt(x string) (n, rest string, ok bool) {
+ i := 0
+ for i < len(x) && '0' <= x[i] && x[i] <= '9' {
+ i++
+ }
+ if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero
+ return "", "", false
+ }
+ return x[:i], x[i:], true
+}
+
+// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
+// (Copied from golang.org/x/mod/semver's compareInt.)
+func cmpInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
new file mode 100644
index 0000000000..0fc10ce4eb
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// FileVersion returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+ // In tools built with Go >= 1.22, the Go version of a file
+ // follow a cascades of sources:
+ // 1) types.Info.FileVersion, which follows the cascade:
+ // 1.a) file version (ast.File.GoVersion),
+ // 1.b) the package version (types.Config.GoVersion), or
+ // 2) is some unknown Future version.
+ //
+ // File versions require a valid package version to be provided to types
+ // in Config.GoVersion. Config.GoVersion is either from the package's module
+ // or the toolchain (go run). This value should be provided by go/packages
+ // or unitchecker.Config.GoVersion.
+ if v := info.FileVersions[file]; IsValid(v) {
+ return v
+ }
+ // Note: we could instead return runtime.Version() [if valid].
+ // This would act as a max version on what a tool can support.
+ return Future
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go
new file mode 100644
index 0000000000..8d1f7453db
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/versions.go
@@ -0,0 +1,57 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+import (
+ "strings"
+)
+
+// Note: If we use build tags to use go/versions when go >=1.22,
+// we run into go.dev/issue/53737. Under some operations users would see an
+// import of "go/versions" even if they would not compile the file.
+// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
+// For this reason, this library just a clone of go/versions for the moment.
+
+// Lang returns the Go language version for version x.
+// If x is not a valid version, Lang returns the empty string.
+// For example:
+//
+// Lang("go1.21rc2") = "go1.21"
+// Lang("go1.21.2") = "go1.21"
+// Lang("go1.21") = "go1.21"
+// Lang("go1") = "go1"
+// Lang("bad") = ""
+// Lang("1.21") = ""
+func Lang(x string) string {
+ v := lang(stripGo(x))
+ if v == "" {
+ return ""
+ }
+ return x[:2+len(v)] // "go"+v without allocation
+}
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as Go versions.
+// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
+// Invalid versions, including the empty string, compare less than
+// valid versions and equal to each other.
+// The language version "go1.21" compares less than the
+// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
+// Custom toolchain suffixes are ignored during comparison:
+// "go1.21.0" and "go1.21.0-bigcorp" are equal.
+func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) }
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool { return isValid(stripGo(x)) }
+
+// stripGo converts from a "go1.21" version to a "1.21" version.
+// If v does not start with "go", stripGo returns the empty string (a known invalid version).
+func stripGo(v string) string {
+ v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix.
+ if len(v) < 2 || v[:2] != "go" {
+ return ""
+ }
+ return v[2:]
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc983e..743bfb81d6 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) {
func SizeVarint(v uint64) int {
// This computes 1 + (bits.Len64(v)-1)/7.
// 9/64 is a good enough approximation of 1/7
- return int(9*uint32(bits.Len64(v))+64) / 64
+ //
+ // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
+ // instruction, which is very fast on CPUs from the last few years. The
+ // specific way of expressing the calculation matches C++ Protobuf, see
+ // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
+ // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
+
+ // By OR'ing v with 1, we guarantee that v is never 0, without changing the
+ // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
+ // needs to add extra instructions to handle that case.
+ //
+ // The Go compiler currently (go1.24.4) does not make use of this knowledge.
+ // This opportunity (removing the XOR instruction, which handles the 0 case)
+ // results in a small (1%) performance win across CPU architectures.
+ //
+ // Independently of avoiding the 0 case, we need the v |= 1 line because
+ // it allows the Go compiler to eliminate an extra XCHGL barrier.
+ v |= 1
+
+ // It would be clearer to write log2value := 63 - uint32(...), but
+ // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
+ // Proof of identity for our value range [0..63]:
+ // https://go.dev/play/p/Pdn9hEWYakX
+ log2value := uint32(bits.LeadingZeros64(v)) ^ 63
+ return int((log2value*9 + (64 + 9)) / 64)
}
// AppendFixed32 appends v to b as a little-endian uint32.
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 323829da14..04696351ee 100644
Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index b08b71830c..a0aad2777f 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -72,6 +72,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
case genid.FeatureSet_EnforceNamingStyle_field_number:
// EnforceNamingStyle is enforced in protoc, languages other than C++
// are not supposed to do anything with this feature.
+ case genid.FeatureSet_DefaultSymbolVisibility_field_number:
+ // DefaultSymbolVisibility is enforced in protoc, runtimes should not
+ // inspect this value.
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
new file mode 100644
index 0000000000..a12ec9791c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import "google.golang.org/protobuf/reflect/protoreflect"
+
+// UsePresenceForField reports whether the presence bitmap should be used for
+// the specified field.
+func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ // Oneof fields never use the presence bitmap.
+ //
+ // Synthetic oneofs are an exception: Those are used to implement proto3
+ // optional fields and hence should follow non-oneof field semantics.
+ return false, false
+
+ case fd.IsMap():
+ // Map-typed fields never use the presence bitmap.
+ return false, false
+
+ case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+ // Lazy fields always use the presence bitmap (only messages can be lazy).
+ isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
+ return isLazy, isLazy
+
+ default:
+ // If the field has presence, use the presence bitmap.
+ return fd.HasPresence(), false
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
index df8f918501..3ceb6fa7f5 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -27,6 +27,7 @@ const (
Api_SourceContext_field_name protoreflect.Name = "source_context"
Api_Mixins_field_name protoreflect.Name = "mixins"
Api_Syntax_field_name protoreflect.Name = "syntax"
+ Api_Edition_field_name protoreflect.Name = "edition"
Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name"
Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods"
@@ -35,6 +36,7 @@ const (
Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins"
Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax"
+ Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition"
)
// Field numbers for google.protobuf.Api.
@@ -46,6 +48,7 @@ const (
Api_SourceContext_field_number protoreflect.FieldNumber = 5
Api_Mixins_field_number protoreflect.FieldNumber = 6
Api_Syntax_field_number protoreflect.FieldNumber = 7
+ Api_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Method.
@@ -63,6 +66,7 @@ const (
Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
Method_Options_field_name protoreflect.Name = "options"
Method_Syntax_field_name protoreflect.Name = "syntax"
+ Method_Edition_field_name protoreflect.Name = "edition"
Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name"
Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url"
@@ -71,6 +75,7 @@ const (
Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options"
Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax"
+ Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition"
)
// Field numbers for google.protobuf.Method.
@@ -82,6 +87,7 @@ const (
Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
Method_Options_field_number protoreflect.FieldNumber = 6
Method_Syntax_field_number protoreflect.FieldNumber = 7
+ Method_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Mixin.
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 39524782ad..950a6a325a 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -34,6 +34,19 @@ const (
Edition_EDITION_MAX_enum_value = 2147483647
)
+// Full and short names for google.protobuf.SymbolVisibility.
+const (
+ SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
+ SymbolVisibility_enum_name = "SymbolVisibility"
+)
+
+// Enum values for google.protobuf.SymbolVisibility.
+const (
+ SymbolVisibility_VISIBILITY_UNSET_enum_value = 0
+ SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1
+ SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
+)
+
// Names for google.protobuf.FileDescriptorSet.
const (
FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
@@ -65,6 +78,7 @@ const (
FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency"
FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency"
+ FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type"
FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type"
FileDescriptorProto_Service_field_name protoreflect.Name = "service"
@@ -79,6 +93,7 @@ const (
FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+ FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
@@ -96,6 +111,7 @@ const (
FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3
FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11
+ FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4
FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5
FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6
@@ -124,6 +140,7 @@ const (
DescriptorProto_Options_field_name protoreflect.Name = "options"
DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+ DescriptorProto_Visibility_field_name protoreflect.Name = "visibility"
DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name"
DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field"
@@ -135,6 +152,7 @@ const (
DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options"
DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+ DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
)
// Field numbers for google.protobuf.DescriptorProto.
@@ -149,6 +167,7 @@ const (
DescriptorProto_Options_field_number protoreflect.FieldNumber = 7
DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9
DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10
+ DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11
)
// Names for google.protobuf.DescriptorProto.ExtensionRange.
@@ -388,12 +407,14 @@ const (
EnumDescriptorProto_Options_field_name protoreflect.Name = "options"
EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+ EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility"
EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+ EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
)
// Field numbers for google.protobuf.EnumDescriptorProto.
@@ -403,6 +424,7 @@ const (
EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5
+ EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
@@ -1008,32 +1030,35 @@ const (
// Field names for google.protobuf.FeatureSet.
const (
- FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
- FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
- FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
- FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
- FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
- FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
- FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style"
-
- FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
- FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
- FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
- FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
- FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
- FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
- FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+ FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
+ FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
+ FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
+ FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
+ FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style"
+ FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
+
+ FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+ FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+ FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+ FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+ FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+ FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
)
// Field numbers for google.protobuf.FeatureSet.
const (
- FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
- FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
- FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
- FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
- FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
- FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
- FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7
+ FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
+ FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
+ FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
+ FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
+ FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
+ FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
+ FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7
+ FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
)
// Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1128,6 +1153,27 @@ const (
FeatureSet_STYLE_LEGACY_enum_value = 2
)
+// Names for google.protobuf.FeatureSet.VisibilityFeature.
+const (
+ FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature"
+ FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
+)
+
+// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility"
+)
+
+// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+ FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
+ FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1
+ FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2
+ FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3
+ FeatureSet_VisibilityFeature_STRICT_enum_value = 4
+)
+
// Names for google.protobuf.FeatureSetDefaults.
const (
FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults"
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index 41c1f74ef8..bdad12a9bb 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -11,6 +11,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/reflect/protoreflect"
piface "google.golang.org/protobuf/runtime/protoiface"
@@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
// permit us to skip over definitely-unset fields at marshal time.
var hasPresence bool
- hasPresence, cf.isLazy = usePresenceForField(si, fd)
+ hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
if hasPresence {
cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index dd55e8e009..5a439daacb 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -11,6 +11,7 @@ import (
"strings"
"sync/atomic"
+ "google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/reflect/protoreflect"
)
@@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
fd := fds.Get(i)
fs := si.fieldsByNumber[fd.Number()]
var fi fieldInfo
- usePresence, _ := usePresenceForField(si, fd)
+ usePresence, _ := filedesc.UsePresenceForField(fd)
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
@@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
if p.IsNil() {
return false
}
- sp := p.Apply(fieldOffset).AtomicGetPointer()
- if sp.IsNil() {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
return false
}
- rv := sp.AsValueOf(fs.Type.Elem())
return rv.Elem().Len() > 0
},
clear: func(p pointer) {
- sp := p.Apply(fieldOffset).AtomicGetPointer()
- if !sp.IsNil() {
- rv := sp.AsValueOf(fs.Type.Elem())
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if !rv.IsNil() {
rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
}
},
@@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
if p.IsNil() {
return conv.Zero()
}
- sp := p.Apply(fieldOffset).AtomicGetPointer()
- if sp.IsNil() {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
return conv.Zero()
}
- rv := sp.AsValueOf(fs.Type.Elem())
if rv.Elem().Len() == 0 {
return conv.Zero()
}
@@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
func (mi *MessageInfo) present(p pointer, index uint32) bool {
return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
}
-
-// usePresenceForField implements the somewhat intricate logic of when
-// the presence bitmap is used for a field. The main logic is that a
-// field that is optional or that can be lazy will use the presence
-// bit, but for proto2, also maps have a presence bit. It also records
-// if the field can ever be lazy, which is true if we have a
-// lazyOffset and the field is a message or a slice of messages. A
-// field that is lazy will always need a presence bit. Oneofs are not
-// lazy and do not use presence, unless they are a synthetic oneof,
-// which is a proto3 optional field. For proto3 optionals, we use the
-// presence and they can also be lazy when applicable (a message).
-func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
- hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
-
- // Non-oneof scalar fields with explicit field presence use the presence array.
- usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
- switch {
- case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
- return false, false
- case fd.IsMap():
- return false, false
- case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
- return hasLazyField, hasLazyField
- default:
- return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
- }
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
index 914cb1deda..443afe81cd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/presence.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) {
// Present checks for the presence of a specific field number in a presence set.
func (p presence) Present(num uint32) bool {
- if p.P == nil {
- return false
- }
return Export{}.Present(p.toElem(num), num)
}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index aac1cb18a7..697d1c14f3 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 6
+ Patch = 8
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index a4a0a2971d..730331e666 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
b = p.appendRepeatedField(b, "public_dependency", nil)
case 11:
b = p.appendRepeatedField(b, "weak_dependency", nil)
+ case 15:
+ b = p.appendRepeatedField(b, "option_dependency", nil)
case 4:
b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
case 5:
@@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
case 10:
b = p.appendRepeatedField(b, "reserved_name", nil)
+ case 11:
+ b = p.appendSingularField(b, "visibility", nil)
}
return b
}
@@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
case 5:
b = p.appendRepeatedField(b, "reserved_name", nil)
+ case 6:
+ b = p.appendSingularField(b, "visibility", nil)
}
return b
}
@@ -400,6 +406,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte {
b = p.appendSingularField(b, "json_format", nil)
case 7:
b = p.appendSingularField(b, "enforce_naming_style", nil)
+ case 8:
+ b = p.appendSingularField(b, "default_symbol_visibility", nil)
}
return b
}
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 7fe280f194..4eacb523c3 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
}
+// Describes the 'visibility' of a symbol with respect to the proto import
+// system. Symbols can only be imported when the visibility rules do not prevent
+// it (ex: local symbols cannot be imported). Visibility modifiers can only set
+// on `message` and `enum` as they are the only types available to be referenced
+// from other files.
+type SymbolVisibility int32
+
+const (
+ SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0
+ SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1
+ SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
+)
+
+// Enum value maps for SymbolVisibility.
+var (
+ SymbolVisibility_name = map[int32]string{
+ 0: "VISIBILITY_UNSET",
+ 1: "VISIBILITY_LOCAL",
+ 2: "VISIBILITY_EXPORT",
+ }
+ SymbolVisibility_value = map[string]int32{
+ "VISIBILITY_UNSET": 0,
+ "VISIBILITY_LOCAL": 1,
+ "VISIBILITY_EXPORT": 2,
+ }
+)
+
+func (x SymbolVisibility) Enum() *SymbolVisibility {
+ p := new(SymbolVisibility)
+ *p = x
+ return p
+}
+
+func (x SymbolVisibility) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (SymbolVisibility) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x SymbolVisibility) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = SymbolVisibility(num)
+ return nil
+}
+
+// Deprecated: Use SymbolVisibility.Descriptor instead.
+func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
// The verification state of the extension range.
type ExtensionRangeOptions_VerificationState int32
@@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
}
func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
}
func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[1]
+ return &file_google_protobuf_descriptor_proto_enumTypes[2]
}
func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string {
}
func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
}
func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[2]
+ return &file_google_protobuf_descriptor_proto_enumTypes[3]
}
func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string {
}
func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
}
func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[3]
+ return &file_google_protobuf_descriptor_proto_enumTypes[4]
}
func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string {
}
func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
}
func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[4]
+ return &file_google_protobuf_descriptor_proto_enumTypes[5]
}
func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string {
}
func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
}
func (FieldOptions_CType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[5]
+ return &file_google_protobuf_descriptor_proto_enumTypes[6]
}
func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string {
}
func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
}
func (FieldOptions_JSType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[6]
+ return &file_google_protobuf_descriptor_proto_enumTypes[7]
}
func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string {
}
func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
}
func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[7]
+ return &file_google_protobuf_descriptor_proto_enumTypes[8]
}
func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string {
}
func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
}
func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[8]
+ return &file_google_protobuf_descriptor_proto_enumTypes[9]
}
func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
}
func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
}
func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[9]
+ return &file_google_protobuf_descriptor_proto_enumTypes[10]
}
func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string {
}
func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
}
func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[10]
+ return &file_google_protobuf_descriptor_proto_enumTypes[11]
}
func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
@@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string {
}
func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
}
func (FeatureSet_EnumType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[11]
+ return &file_google_protobuf_descriptor_proto_enumTypes[12]
}
func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
@@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string {
}
func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
}
func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[12]
+ return &file_google_protobuf_descriptor_proto_enumTypes[13]
}
func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
@@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string {
}
func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
}
func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[13]
+ return &file_google_protobuf_descriptor_proto_enumTypes[14]
}
func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
@@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string {
}
func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
}
func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[14]
+ return &file_google_protobuf_descriptor_proto_enumTypes[15]
}
func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
@@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string {
}
func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
}
func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[15]
+ return &file_google_protobuf_descriptor_proto_enumTypes[16]
}
func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
@@ -1172,11 +1236,11 @@ func (x FeatureSet_EnforceNamingStyle) String() string {
}
func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
}
func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[16]
+ return &file_google_protobuf_descriptor_proto_enumTypes[17]
}
func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
@@ -1198,6 +1262,77 @@ func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
}
+type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
+
+const (
+ FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
+ // Default pre-EDITION_2024, all UNSET visibility are export.
+ FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
+ // All top-level symbols default to export, nested default to local.
+ FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
+ // All symbols default to local.
+ FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
+ // All symbols local by default. Nested types cannot be exported.
+ // With special case caveat for message { enum {} reserved 1 to max; }
+ // This is the recommended setting for new protos.
+ FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
+)
+
+// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
+var (
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
+ 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
+ 1: "EXPORT_ALL",
+ 2: "EXPORT_TOP_LEVEL",
+ 3: "LOCAL_ALL",
+ 4: "STRICT",
+ }
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
+ "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
+ "EXPORT_ALL": 1,
+ "EXPORT_TOP_LEVEL": 2,
+ "LOCAL_ALL": 3,
+ "STRICT": 4,
+ }
+)
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+ p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[18]
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
+}
+
// Represents the identified object's effect on the element in the original
// .proto file.
type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1236,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
}
func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
}
func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[17]
+ return &file_google_protobuf_descriptor_proto_enumTypes[19]
}
func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1321,6 +1456,9 @@ type FileDescriptorProto struct {
// Indexes of the weak imported files in the dependency list.
// For Google-internal migration only. Do not use.
WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // Names of files imported by this file purely for the purpose of providing
+ // option extensions. These are excluded from the dependency list above.
+ OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
// All top-level definitions in this file.
MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
@@ -1414,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 {
return nil
}
+func (x *FileDescriptorProto) GetOptionDependency() []string {
+ if x != nil {
+ return x.OptionDependency
+ }
+ return nil
+}
+
func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
if x != nil {
return x.MessageType
@@ -1484,7 +1629,9 @@ type DescriptorProto struct {
ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
- ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ // Support for `export` and `local` keywords on enums.
+ Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -1589,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string {
return nil
}
+func (x *DescriptorProto) GetVisibility() SymbolVisibility {
+ if x != nil && x.Visibility != nil {
+ return *x.Visibility
+ }
+ return SymbolVisibility_VISIBILITY_UNSET
+}
+
type ExtensionRangeOptions struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The parser stores options it doesn't recognize here. See above.
@@ -1901,7 +2055,9 @@ type EnumDescriptorProto struct {
ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
// Reserved enum value names, which may not be reused. A given name may only
// be reserved once.
- ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ // Support for `export` and `local` keywords on enums.
+ Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -1971,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
return nil
}
+func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
+ if x != nil && x.Visibility != nil {
+ return *x.Visibility
+ }
+ return SymbolVisibility_VISIBILITY_UNSET
+}
+
// Describes a value within an enum.
type EnumValueDescriptorProto struct {
state protoimpl.MessageState `protogen:"open.v1"`
@@ -2710,7 +2873,10 @@ type FieldOptions struct {
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // DEPRECATED. DO NOT USE!
// For Google-internal migration only. Do not use.
+ //
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
// Indicate that the field value should not be printed out when using debug
// formats, e.g. when the field contains sensitive credentials.
@@ -2814,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool {
return Default_FieldOptions_Deprecated
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
func (x *FieldOptions) GetWeak() bool {
if x != nil && x.Weak != nil {
return *x.Weak
@@ -3392,17 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string {
// be designed and implemented to handle this, hopefully before we ever hit a
// conflict here.
type FeatureSet struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
- EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
- RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
- Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
- MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
- JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
- EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
- extensionFields protoimpl.ExtensionFields
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+ EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+ RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+ Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+ MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+ JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+ EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+ DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSet) Reset() {
@@ -3484,6 +3652,13 @@ func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
}
+func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+ if x != nil && x.DefaultSymbolVisibility != nil {
+ return *x.DefaultSymbolVisibility
+ }
+ return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
+}
+
// A compiled specification for the defaults of a set of features. These
// messages are generated from FeatureSet extensions and can be used to seed
// feature resolution. The resolution with this object becomes a simple search
@@ -4144,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
return false
}
+type FeatureSet_VisibilityFeature struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *FeatureSet_VisibilityFeature) Reset() {
+ *x = FeatureSet_VisibilityFeature{}
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *FeatureSet_VisibilityFeature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
+
+func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
+func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
// A map from every known edition with a unique set of defaults to its
// defaults. Not all editions may be contained here. For a given edition,
// the defaults at the closest matching edition ordered at or before it should
@@ -4161,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
*x = FeatureSetDefaults_FeatureSetEditionDefault{}
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4173,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4309,7 +4520,7 @@ type SourceCodeInfo_Location struct {
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4321,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string {
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4393,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct {
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
- mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4405,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4462,7 +4673,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\n" +
" google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
"\x11FileDescriptorSet\x128\n" +
- "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" +
+ "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
"\x13FileDescriptorProto\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
"\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
@@ -4471,7 +4682,8 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"dependency\x12+\n" +
"\x11public_dependency\x18\n" +
" \x03(\x05R\x10publicDependency\x12'\n" +
- "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" +
+ "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
+ "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
"\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
"\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
"\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
@@ -4479,7 +4691,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
"\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
"\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
- "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" +
+ "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
"\x0fDescriptorProto\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
"\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
@@ -4493,7 +4705,10 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
"\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
"\rreserved_name\x18\n" +
- " \x03(\tR\freservedName\x1az\n" +
+ " \x03(\tR\freservedName\x12A\n" +
+ "\n" +
+ "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+ "visibility\x1az\n" +
"\x0eExtensionRange\x12\x14\n" +
"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
"\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
@@ -4562,13 +4777,16 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\x0eLABEL_REQUIRED\x10\x02\"c\n" +
"\x14OneofDescriptorProto\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
- "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" +
+ "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
"\x13EnumDescriptorProto\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
"\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
"\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
"\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
- "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" +
+ "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
+ "\n" +
+ "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+ "visibility\x1a;\n" +
"\x11EnumReservedRange\x12\x14\n" +
"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
"\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
@@ -4629,7 +4847,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
"\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
- "\"\x9d\r\n" +
+ "\"\xa1\r\n" +
"\fFieldOptions\x12A\n" +
"\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
"\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
@@ -4638,9 +4856,9 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
"\n" +
"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
- "deprecated\x12\x19\n" +
+ "deprecated\x12\x1d\n" +
"\x04weak\x18\n" +
- " \x01(\b:\x05falseR\x04weak\x12(\n" +
+ " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
"\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
"\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
"\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
@@ -4728,7 +4946,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
"\bNamePart\x12\x1b\n" +
"\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
- "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" +
+ "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
"\n" +
"FeatureSet\x12\x91\x01\n" +
"\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
@@ -4739,7 +4957,18 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
"\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
"jsonFormat\x12\xab\x01\n" +
- "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" +
+ "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
+ "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
+ "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
+ "\x11VisibilityFeature\"\x81\x01\n" +
+ "\x17DefaultSymbolVisibility\x12%\n" +
+ "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
+ "\n" +
+ "EXPORT_ALL\x10\x01\x12\x14\n" +
+ "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
+ "\tLOCAL_ALL\x10\x03\x12\n" +
+ "\n" +
+ "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
"\rFieldPresence\x12\x1a\n" +
"\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
"\bEXPLICIT\x10\x01\x12\f\n" +
@@ -4817,7 +5046,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
"\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
"\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
- "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" +
+ "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
+ "\x10SymbolVisibility\x12\x14\n" +
+ "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
+ "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
+ "\x11VISIBILITY_EXPORT\x10\x02B~\n" +
"\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
var (
@@ -4832,145 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
return file_google_protobuf_descriptor_proto_rawDescData
}
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
var file_google_protobuf_descriptor_proto_goTypes = []any{
- (Edition)(0), // 0: google.protobuf.Edition
- (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState
- (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type
- (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label
- (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode
- (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType
- (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType
- (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention
- (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType
- (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel
- (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence
- (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType
- (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
- (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation
- (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding
- (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat
- (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle
- (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet
- (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto
- (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto
- (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions
- (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto
- (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto
- (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto
- (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto
- (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto
- (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto
- (*FileOptions)(nil), // 28: google.protobuf.FileOptions
- (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions
- (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions
- (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions
- (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions
- (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions
- (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions
- (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions
- (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption
- (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet
- (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults
- (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo
- (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo
- (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange
- (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange
- (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration
- (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange
- (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault
- (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport
- (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart
- (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation
+ (Edition)(0), // 0: google.protobuf.Edition
+ (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
+ (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState
+ (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type
+ (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label
+ (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode
+ (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType
+ (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType
+ (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention
+ (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType
+ (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel
+ (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence
+ (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType
+ (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
+ (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation
+ (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding
+ (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat
+ (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle
+ (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+ (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 30: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption
+ (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet
+ (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults
+ (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange
+ (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault
+ (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport
+ (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart
+ (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature
+ (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
- 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
- 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
- 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
- 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
- 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
- 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
- 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
- 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
- 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
- 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
- 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
- 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
- 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
- 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
- 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
- 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
- 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
- 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
- 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
- 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
- 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
- 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
- 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
- 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
- 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
- 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
- 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
- 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
- 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
- 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
- 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
- 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
- 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
- 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
- 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
- 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
- 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
- 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
- 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
- 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
- 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
- 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
- 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
- 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
- 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
- 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
- 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
- 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
- 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
- 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
- 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
- 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
- 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
- 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
- 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
- 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
- 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
- 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
- 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- 78, // [78:78] is the sub-list for method output_type
- 78, // [78:78] is the sub-list for method input_type
- 78, // [78:78] is the sub-list for extension type_name
- 78, // [78:78] is the sub-list for extension extendee
- 0, // [0:78] is the sub-list for field type_name
+ 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+ 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+ 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+ 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+ 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+ 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+ 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+ 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+ 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+ 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+ 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+ 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+ 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+ 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+ 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+ 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+ 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+ 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+ 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+ 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+ 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+ 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+ 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+ 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+ 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+ 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+ 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+ 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+ 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+ 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+ 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+ 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+ 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 81, // [81:81] is the sub-list for method output_type
+ 81, // [81:81] is the sub-list for method input_type
+ 81, // [81:81] is the sub-list for extension type_name
+ 81, // [81:81] is the sub-list for extension extendee
+ 0, // [0:81] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -4983,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
- NumEnums: 18,
- NumMessages: 33,
+ NumEnums: 20,
+ NumMessages: 34,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
index 167baf680d..58751ed0ec 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package errors provides detailed error types for api field validation.
-package errors // import "k8s.io/apimachinery/pkg/api/errors"
+package errors
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index 57e0e71f67..7b57a9eb6c 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -54,6 +54,7 @@ var knownReasons = map[metav1.StatusReason]struct{}{
metav1.StatusReasonGone: {},
metav1.StatusReasonInvalid: {},
metav1.StatusReasonServerTimeout: {},
+ metav1.StatusReasonStoreReadError: {},
metav1.StatusReasonTimeout: {},
metav1.StatusReasonTooManyRequests: {},
metav1.StatusReasonBadRequest: {},
@@ -257,7 +258,8 @@ func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError
}
// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
-// DEPRECATED: Please use NewResourceExpired instead.
+//
+// Deprecated: Please use NewResourceExpired instead.
func NewGone(message string) *StatusError {
return &StatusError{metav1.Status{
Status: metav1.StatusFailure,
@@ -437,7 +439,7 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr
message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
switch code {
case http.StatusConflict:
- if verb == "POST" {
+ if verb == http.MethodPost {
reason = metav1.StatusReasonAlreadyExists
} else {
reason = metav1.StatusReasonConflict
@@ -775,6 +777,12 @@ func IsUnexpectedObjectError(err error) bool {
return err != nil && (ok || errors.As(err, &uoe))
}
+// IsStoreReadError determines if err is due to either failure to transform the
+// data from the storage, or failure to decode the object appropriately.
+func IsStoreReadError(err error) bool {
+ return ReasonForError(err) == metav1.StatusReasonStoreReadError
+}
+
// SuggestsClientDelay returns true if this error suggests a client delay as well as the
// suggested seconds to wait, or false if the error does not imply a wait. It does not
// address whether the error *should* be retried, since some errors (like a 3xx) may
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
index 1e1330fff2..3bd8bf535e 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
@@ -10,5 +10,6 @@ reviewers:
- mikedanese
- liggitt
- janetkuo
- - ncdc
- dims
+emeritus_reviewers:
+ - ncdc
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
index b6d42acf8f..a3b18a5c9a 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package meta provides functions for retrieving API metadata from objects
// belonging to the Kubernetes API
-package meta // import "k8s.io/apimachinery/pkg/api/meta"
+package meta
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
index 1fdd32c4ba..468afd0e9e 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
@@ -221,6 +221,9 @@ func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) {
if err != nil {
return nil, err
}
+ if items.IsNil() {
+ return nil, nil
+ }
list := make([]runtime.Object, items.Len())
if len(list) == 0 {
return list, nil
diff --git a/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go
new file mode 100644
index 0000000000..86b6ddf37c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operation
+
+import (
+ "slices"
+ "strings"
+)
+
+// Operation provides contextual information about a validation request and the API
+// operation being validated.
+// This type is intended for use with generate validation code and may be enhanced
+// in the future to include other information needed to validate requests.
+type Operation struct {
+ // Type is the category of operation being validated. This does not
+ // differentiate between HTTP verbs like PUT and PATCH, but rather merges
+ // those into a single "Update" category.
+ Type Type
+
+ // Options declare the options enabled for validation.
+ //
+ // Options should be set according to a resource validation strategy before validation
+ // is performed, and must be treated as read-only during validation.
+ //
+ // Options are identified by string names. Option string names may match the name of a feature
+ // gate, in which case the presence of the name in the set indicates that the feature is
+ // considered enabled for the resource being validated. Note that a resource may have a
+ // feature enabled even when the feature gate is disabled. This can happen when feature is
+ // already in-use by a resource, often because the feature gate was enabled when the
+ // resource first began using the feature.
+ //
+ // Unset options are disabled/false.
+ Options []string
+
+ // Request provides information about the request being validated.
+ Request Request
+}
+
+// HasOption returns true if the given string is in the Options slice.
+func (o Operation) HasOption(option string) bool {
+ return slices.Contains(o.Options, option)
+}
+
+// Request provides information about the request being validated.
+type Request struct {
+ // Subresources identifies the subresource path components of the request. For
+ // example, Subresources for a request to `/api/v1/pods/my-pod/status` would be
+ // `["status"]`. For `/api/v1/widget/my-widget/x/y/z`, it would be `["x", "y",
+ // "z"]`. For a root resource (`/api/v1/pods/my-pod`), Subresources will be an
+ // empty slice.
+ //
+ // Validation logic should only consult this field if the validation rules for a
+ // particular field differ depending on whether the main resource or a specific
+ // subresource is being accessed. For example:
+ //
+ // Updates to a Pod resource (`/`) normally cannot change container resource
+ // requests/limits after the Pod is created (they are immutable). However, when
+ // accessing the Pod's "resize" subresource (`/resize`), these specific fields
+ // are allowed to be modified. In this scenario, the validation logic for
+ // `spec.container[*].resources` must check `Subresources` to permit changes only
+ // when the request targets the "resize" subresource.
+ //
+ // Note: This field should not be used to control which fields a subresource
+ // operation is allowed to write. This is the responsibility of "field wiping".
+ // Field wiping logic is expected to be handled in resource strategies by
+ // modifying the incoming object before it is validated.
+ Subresources []string
+}
+
+// SubresourcePath returns the path is a slash-separated list of subresource
+// names. For example, `/status`, `/resize`, or `/x/y/z`.
+func (r Request) SubresourcePath() string {
+ if len(r.Subresources) == 0 {
+ return "/"
+ }
+ return "/" + strings.Join(r.Subresources, "/")
+}
+
+// Code is the request operation to be validated.
+type Type uint32
+
+const (
+ // Create indicates the request being validated is for a resource create operation.
+ Create Type = iota
+
+ // Update indicates the request being validated is for a resource update operation.
+ Update
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
index 53a25d3449..9e1a5c0e1f 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
@@ -15,100 +15,10 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
+// source: k8s.io/apimachinery/pkg/api/resource/generated.proto
package resource
-import (
- fmt "fmt"
+func (m *Quantity) Reset() { *m = Quantity{} }
- math "math"
-
- proto "github.com/gogo/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *Quantity) Reset() { *m = Quantity{} }
-func (*Quantity) ProtoMessage() {}
-func (*Quantity) Descriptor() ([]byte, []int) {
- return fileDescriptor_612bba87bd70906c, []int{0}
-}
-func (m *Quantity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantity.Unmarshal(m, b)
-}
-func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantity.Marshal(b, m, deterministic)
-}
-func (m *Quantity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantity.Merge(m, src)
-}
-func (m *Quantity) XXX_Size() int {
- return xxx_messageInfo_Quantity.Size(m)
-}
-func (m *Quantity) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Quantity proto.InternalMessageInfo
-
-func (m *QuantityValue) Reset() { *m = QuantityValue{} }
-func (*QuantityValue) ProtoMessage() {}
-func (*QuantityValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_612bba87bd70906c, []int{1}
-}
-func (m *QuantityValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_QuantityValue.Unmarshal(m, b)
-}
-func (m *QuantityValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_QuantityValue.Marshal(b, m, deterministic)
-}
-func (m *QuantityValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QuantityValue.Merge(m, src)
-}
-func (m *QuantityValue) XXX_Size() int {
- return xxx_messageInfo_QuantityValue.Size(m)
-}
-func (m *QuantityValue) XXX_DiscardUnknown() {
- xxx_messageInfo_QuantityValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QuantityValue proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity")
- proto.RegisterType((*QuantityValue)(nil), "k8s.io.apimachinery.pkg.api.resource.QuantityValue")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c)
-}
-
-var fileDescriptor_612bba87bd70906c = []byte{
- // 254 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0xcd, 0xb6, 0x28, 0xd6,
- 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0x4b,
- 0xcd, 0x4b, 0xc9, 0x2f, 0xd2, 0x87, 0x4a, 0x24, 0x16, 0x64, 0xe6, 0x26, 0x26, 0x67, 0x64, 0xe6,
- 0xa5, 0x16, 0x55, 0xea, 0x17, 0x64, 0xa7, 0x83, 0x04, 0xf4, 0x8b, 0x52, 0x8b, 0xf3, 0x4b, 0x8b,
- 0x92, 0x53, 0xf5, 0xd3, 0x53, 0xf3, 0x52, 0x8b, 0x12, 0x4b, 0x52, 0x53, 0xf4, 0x0a, 0x8a, 0xf2,
- 0x4b, 0xf2, 0x85, 0x54, 0x20, 0xba, 0xf4, 0x90, 0x75, 0xe9, 0x15, 0x64, 0xa7, 0x83, 0x04, 0xf4,
- 0x60, 0xba, 0xa4, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3,
- 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x18,
- 0xaa, 0x64, 0xc1, 0xc5, 0x11, 0x58, 0x9a, 0x98, 0x57, 0x92, 0x59, 0x52, 0x29, 0x24, 0xc6, 0xc5,
- 0x56, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe5, 0x59,
- 0x89, 0xcc, 0x58, 0x20, 0xcf, 0xd0, 0xb1, 0x50, 0x9e, 0x61, 0xc2, 0x42, 0x79, 0x86, 0x05, 0x0b,
- 0xe5, 0x19, 0x1a, 0xee, 0x28, 0x30, 0x28, 0xd9, 0x72, 0xf1, 0xc2, 0x74, 0x86, 0x25, 0xe6, 0x94,
- 0xa6, 0x92, 0xa6, 0xdd, 0xc9, 0xeb, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c,
- 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x37,
- 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x94, 0x0a, 0x31, 0x21,
- 0x05, 0x08, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x70, 0x98, 0xa3, 0x69, 0x01, 0x00, 0x00,
-}
+func (m *QuantityValue) Reset() { *m = QuantityValue{} }
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
index ddd0db8fbd..875ad8577a 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
@@ -93,6 +93,7 @@ option go_package = "k8s.io/apimachinery/pkg/api/resource";
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
message Quantity {
optional string string = 1;
}
@@ -105,6 +106,7 @@ message Quantity {
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
message QuantityValue {
optional string string = 1;
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.protomessage.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.protomessage.pb.go
new file mode 100644
index 0000000000..712e155c3e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.protomessage.pb.go
@@ -0,0 +1,26 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package resource
+
+func (*Quantity) ProtoMessage() {}
+
+func (*QuantityValue) ProtoMessage() {}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index 69f1bc336d..f3cd600604 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -20,11 +20,13 @@ import (
"bytes"
"errors"
"fmt"
- "math"
+ math "math"
"math/big"
"strconv"
"strings"
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
+
inf "gopkg.in/inf.v0"
)
@@ -97,6 +99,7 @@ import (
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
type Quantity struct {
// i is the quantity in int64 scaled form, if d.Dec == nil
i int64Amount
@@ -458,9 +461,10 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
}
}
-// AsApproximateFloat64 returns a float64 representation of the quantity which may
-// lose precision. If the value of the quantity is outside the range of a float64
-// +Inf/-Inf will be returned.
+// AsApproximateFloat64 returns a float64 representation of the quantity which
+// may lose precision. If precision matter more than performance, see
+// AsFloat64Slow. If the value of the quantity is outside the range of a
+// float64 +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
@@ -478,6 +482,36 @@ func (q *Quantity) AsApproximateFloat64() float64 {
return base * math.Pow10(exponent)
}
+// AsFloat64Slow returns a float64 representation of the quantity. This is
+// more precise than AsApproximateFloat64 but significantly slower. If the
+// value of the quantity is outside the range of a float64 +Inf/-Inf will be
+// returned.
+func (q *Quantity) AsFloat64Slow() float64 {
+ infDec := q.AsDec()
+
+ var absScale int64
+ if infDec.Scale() < 0 {
+ absScale = int64(-infDec.Scale())
+ } else {
+ absScale = int64(infDec.Scale())
+ }
+ pow10AbsScale := big.NewInt(10)
+ pow10AbsScale = pow10AbsScale.Exp(pow10AbsScale, big.NewInt(absScale), nil)
+
+ var resultBigFloat *big.Float
+ if infDec.Scale() < 0 {
+ resultBigInt := new(big.Int).Mul(infDec.UnscaledBig(), pow10AbsScale)
+ resultBigFloat = new(big.Float).SetInt(resultBigInt)
+ } else {
+ pow10AbsScaleFloat := new(big.Float).SetInt(pow10AbsScale)
+ resultBigFloat = new(big.Float).SetInt(infDec.UnscaledBig())
+ resultBigFloat = resultBigFloat.Quo(resultBigFloat, pow10AbsScaleFloat)
+ }
+
+ result, _ := resultBigFloat.Float64()
+ return result
+}
+
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
@@ -683,6 +717,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
return result, nil
}
+func (q Quantity) MarshalCBOR() ([]byte, error) {
+ // The call to String() should never return the string "" because the receiver's
+ // address will never be nil.
+ return cbor.Marshal(q.String())
+}
+
// ToUnstructured implements the value.UnstructuredConverter interface.
func (q Quantity) ToUnstructured() interface{} {
return q.String()
@@ -711,6 +751,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error {
return nil
}
+func (q *Quantity) UnmarshalCBOR(value []byte) error {
+ var s *string
+ if err := cbor.Unmarshal(value, &s); err != nil {
+ return err
+ }
+
+ if s == nil {
+ q.d.Dec = nil
+ q.i = int64Amount{}
+ return nil
+ }
+
+ parsed, err := ParseQuantity(strings.TrimSpace(*s))
+ if err != nil {
+ return err
+ }
+
+ *q = parsed
+ return nil
+}
+
// NewDecimalQuantity returns a new Quantity representing the given
// value in the given format.
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
@@ -798,6 +859,7 @@ func (q *Quantity) SetScaled(value int64, scale Scale) {
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
type QuantityValue struct {
Quantity
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
index 3e0cdb10d4..364ec80da2 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
@@ -20,12 +20,8 @@ import (
"fmt"
"io"
"math/bits"
-
- "github.com/gogo/protobuf/proto"
)
-var _ proto.Sizer = &Quantity{}
-
func (m *Quantity) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.model_name.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.model_name.go
new file mode 100644
index 0000000000..2575a2e8c8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.model_name.go
@@ -0,0 +1,32 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package resource
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Quantity) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.api.resource.Quantity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in QuantityValue) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.api.resource.QuantityValue"
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validate/constraints/constraints.go b/vendor/k8s.io/apimachinery/pkg/api/validate/constraints/constraints.go
new file mode 100644
index 0000000000..1689d3c079
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validate/constraints/constraints.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+type Integer interface {
+ Signed | Unsigned
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validate/content/decimal_int.go b/vendor/k8s.io/apimachinery/pkg/api/validate/content/decimal_int.go
new file mode 100644
index 0000000000..5622ca15a8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validate/content/decimal_int.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+const decimalIntegerErrMsg string = "must be a valid decimal integer in canonical form"
+
+// IsDecimalInteger validates that a string represents a decimal integer in strict canonical form.
+// This means the string must be formatted exactly as a human would naturally write an integer,
+// without any programming language conventions like leading zeros, plus signs, or alternate bases.
+//
+// valid values:"0" or Non-zero integers (i.e., "123", "-456") where the first digit is 1-9,
+// followed by any digits 0-9.
+//
+// This validator is stricter than strconv.ParseInt, which accepts leading zeros values (i.e, "0700")
+// and interprets them as decimal 700, potentially causing confusion with octal notation.
+func IsDecimalInteger(value string) []string {
+ n := len(value)
+ if n == 0 {
+ return []string{EmptyError()}
+ }
+
+ i := 0
+ if value[0] == '-' {
+ if n == 1 {
+ return []string{decimalIntegerErrMsg}
+ }
+ i = 1
+ }
+
+ if value[i] == '0' {
+ if n == 1 && i == 0 {
+ return nil
+ }
+ return []string{decimalIntegerErrMsg}
+ }
+
+ if value[i] < '1' || value[i] > '9' {
+ return []string{decimalIntegerErrMsg}
+ }
+
+ for i++; i < n; i++ {
+ if value[i] < '0' || value[i] > '9' {
+ return []string{decimalIntegerErrMsg}
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validate/content/dns.go b/vendor/k8s.io/apimachinery/pkg/api/validate/content/dns.go
new file mode 100644
index 0000000000..bd20720794
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validate/content/dns.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+import (
+ "regexp"
+)
+
+const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+
+const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
+
+// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
+const DNS1123LabelMaxLength int = 63
+
+var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
+
+// IsDNS1123Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1123).
+func IsDNS1123Label(value string) []string {
+ var errs []string
+ if len(value) > DNS1123LabelMaxLength {
+ errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
+ }
+ if !dns1123LabelRegexp.MatchString(value) {
+ if dns1123SubdomainRegexp.MatchString(value) {
+ // It was a valid subdomain and not a valid label. Since we
+ // already checked length, it must be dots.
+ errs = append(errs, "must not contain dots")
+ } else {
+ errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
+ }
+ }
+ return errs
+}
+
+const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
+const dns1123SubdomainFmtCaseless string = "(?i)" + dns1123SubdomainFmt
+const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+const dns1123SubdomainCaselessErrorMsg string = "an RFC 1123 subdomain must consist of alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+
+// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
+const DNS1123SubdomainMaxLength int = 253
+
+var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+var dns1123SubdomainCaselessRegexp = regexp.MustCompile("^" + dns1123SubdomainFmtCaseless + "$")
+
+// IsDNS1123Subdomain tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123) lowercase.
+func IsDNS1123Subdomain(value string) []string {
+ return isDNS1123Subdomain(value, false)
+}
+
+// IsDNS1123SubdomainCaseless tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123).
+//
+// Deprecated: API validation should never be caseless. Caseless validation is a vector
+// for bugs and failed uniqueness assumptions. For example, names like "foo.com" and
+// "FOO.COM" are both accepted as valid, but they are typically not treated as equal by
+// consumers (e.g. CSI and DRA driver names). This fails the "least surprise" principle and
+// can cause inconsistent behaviors.
+//
+// Note: This allows uppercase names but is not caseless — uppercase and lowercase are
+// treated as different values. Use IsDNS1123Subdomain for strict, lowercase validation
+// instead.
+func IsDNS1123SubdomainCaseless(value string) []string {
+ return isDNS1123Subdomain(value, true)
+}
+
+func isDNS1123Subdomain(value string, caseless bool) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ errorMsg := dns1123SubdomainErrorMsg
+ example := "example.com"
+ regexp := dns1123SubdomainRegexp
+ if caseless {
+ errorMsg = dns1123SubdomainCaselessErrorMsg
+ example = "Example.com"
+ regexp = dns1123SubdomainCaselessRegexp
+ }
+ if !regexp.MatchString(value) {
+ errs = append(errs, RegexError(errorMsg, dns1123SubdomainFmt, example))
+ }
+ return errs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go b/vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go
new file mode 100644
index 0000000000..a4a1b5574c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/apimachinery/pkg/api/validate/constraints"
+)
+
+// MinError returns a string explanation of a "must be greater than or equal"
+// validation failure.
+func MinError[T constraints.Integer](min T) string {
+ return fmt.Sprintf("must be greater than or equal to %d", min)
+}
+
+// MaxLenError returns a string explanation of a "string too long" validation
+// failure.
+func MaxLenError(length int) string {
+ return fmt.Sprintf("must be no more than %d bytes", length)
+}
+
+// EmptyError returns a string explanation of an "empty string" validation.
+func EmptyError() string {
+ return "must be non-empty"
+}
+
+// RegexError returns a string explanation of a regex validation failure.
+func RegexError(msg string, re string, examples ...string) string {
+ if len(examples) == 0 {
+ return msg + " (regex used for validation is '" + re + "')"
+ }
+ msg += " (e.g. "
+ for i := range examples {
+ if i > 0 {
+ msg += " or "
+ }
+ msg += "'" + examples[i] + "', "
+ }
+ msg += "regex used for validation is '" + re + "')"
+ return msg
+}
+
+// NEQError returns a string explanation of a "must not be equal to" validation failure.
+func NEQError[T any](disallowed T) string {
+ format := "%v"
+ if reflect.ValueOf(disallowed).Kind() == reflect.String {
+ format = "%q"
+ }
+ return fmt.Sprintf("must not be equal to "+format, disallowed)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validate/content/identifier.go b/vendor/k8s.io/apimachinery/pkg/api/validate/content/identifier.go
new file mode 100644
index 0000000000..3913ec9916
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validate/content/identifier.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+import (
+ "regexp"
+)
+
+const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
+const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'"
+
+var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
+
+// IsCIdentifier tests for a string that conforms the definition of an identifier
+// in C. This checks the format, but not the length.
+func IsCIdentifier(value string) []string {
+ if !cIdentifierRegexp.MatchString(value) {
+ return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validate/content/kube.go b/vendor/k8s.io/apimachinery/pkg/api/validate/content/kube.go
new file mode 100644
index 0000000000..44e82eefd3
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validate/content/kube.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+import (
+ "regexp"
+ "strings"
+)
+
+const labelKeyCharFmt string = "[A-Za-z0-9]"
+const labelKeyExtCharFmt string = "[-A-Za-z0-9_.]"
+const labelKeyFmt string = "(" + labelKeyCharFmt + labelKeyExtCharFmt + "*)?" + labelKeyCharFmt
+const labelKeyErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+const labelKeyMaxLength int = 63
+
+var labelKeyRegexp = regexp.MustCompile("^" + labelKeyFmt + "$")
+
+// IsQualifiedName tests whether the value passed is what Kubernetes calls a
+// "qualified name", which is the same as a label key.
+//
+// Deprecated: use IsLabelKey instead.
+var IsQualifiedName = IsLabelKey
+
+// IsLabelKey tests whether the value passed is a valid label key. This format
+// is used to validate many fields in the Kubernetes API.
+// Label keys consist of an optional prefix and a name, separated by a '/'.
+// If the value is not valid, a list of error strings is returned. Otherwise, an
+// empty list (or nil) is returned.
+func IsLabelKey(value string) []string {
+ var errs []string
+ parts := strings.Split(value, "/")
+ var name string
+ switch len(parts) {
+ case 1:
+ name = parts[0]
+ case 2:
+ var prefix string
+ prefix, name = parts[0], parts[1]
+ if len(prefix) == 0 {
+ errs = append(errs, "prefix part "+EmptyError())
+ } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
+ errs = append(errs, prefixEach(msgs, "prefix part ")...)
+ }
+ default:
+ return append(errs, "a valid label key "+RegexError(labelKeyErrMsg, labelKeyFmt, "MyName", "my.name", "123-abc")+
+ " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')")
+ }
+
+ if len(name) == 0 {
+ errs = append(errs, "name part "+EmptyError())
+ } else if len(name) > labelKeyMaxLength {
+ errs = append(errs, "name part "+MaxLenError(labelKeyMaxLength))
+ }
+ if !labelKeyRegexp.MatchString(name) {
+ errs = append(errs, "name part "+RegexError(labelKeyErrMsg, labelKeyFmt, "MyName", "my.name", "123-abc"))
+ }
+ return errs
+}
+
+const labelValueFmt string = "(" + labelKeyFmt + ")?"
+const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+
+// LabelValueMaxLength is a label's max length
+const LabelValueMaxLength int = 63
+
+var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+
+// IsLabelValue tests whether the value passed is a valid label value. If
+// the value is not valid, a list of error strings is returned. Otherwise an
+// empty list (or nil) is returned.
+func IsLabelValue(value string) []string {
+ var errs []string
+ if len(value) > LabelValueMaxLength {
+ errs = append(errs, MaxLenError(LabelValueMaxLength))
+ }
+ if !labelValueRegexp.MatchString(value) {
+ errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345"))
+ }
+ return errs
+}
+
+func prefixEach(msgs []string, prefix string) []string {
+ for i := range msgs {
+ msgs[i] = prefix + msgs[i]
+ }
+ return msgs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS
new file mode 100644
index 0000000000..4023732476
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# Disable inheritance as this is an api owners file
+options:
+ no_parent_owners: true
+approvers:
+ - api-approvers
+reviewers:
+ - api-reviewers
+labels:
+ - kind/api-change
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go
index 9f20152e45..9e305b0b18 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package validation contains generic api type validation functions.
-package validation // import "k8s.io/apimachinery/pkg/api/validation"
+package validation
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
index e0b5b14900..35ea723a0f 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
@@ -33,6 +33,12 @@ const IsNegativeErrorMsg string = `must be greater than or equal to 0`
// value that were not valid. Otherwise this returns an empty list or nil.
type ValidateNameFunc func(name string, prefix bool) []string
+// ValidateNameFuncWithErrors validates that the provided name is valid for a
+// given resource type.
+//
+// This is similar to ValidateNameFunc, except that it produces an ErrorList.
+type ValidateNameFuncWithErrors func(fldPath *field.Path, name string) field.ErrorList
+
// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain.
func NameIsDNSSubdomain(name string, prefix bool) []string {
if prefix {
@@ -82,7 +88,7 @@ func maskTrailingDash(name string) string {
func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if value < 0 {
- allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg))
+ allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg).WithOrigin("minimum"))
}
return allErrs
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
index 593d7ba8cf..839fcbc2c1 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
@@ -46,11 +46,11 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie
for k := range annotations {
// The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking.
for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {
- allErrs = append(allErrs, field.Invalid(fldPath, k, msg))
+ allErrs = append(allErrs, field.Invalid(fldPath, k, msg)).WithOrigin("format=k8s-label-key")
}
}
if err := ValidateAnnotationsSize(annotations); err != nil {
- allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB))
+ allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, TotalAnnotationSizeLimitB))
}
return allErrs
}
@@ -74,13 +74,13 @@ func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field
allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty"))
}
if len(gvk.Kind) == 0 {
- allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty"))
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "must not be empty"))
}
if len(ownerReference.Name) == 0 {
- allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty"))
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "must not be empty"))
}
if len(ownerReference.UID) == 0 {
- allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty"))
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "must not be empty"))
}
if _, ok := BannedOwners[gvk]; ok {
allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk)))
@@ -138,7 +138,6 @@ func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) fie
// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
// been performed.
-// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
metadata, err := meta.Accessor(objMeta)
if err != nil {
@@ -149,9 +148,37 @@ func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, name
return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath)
}
+// objectMetaValidationOptions defines behavioral modifications for validating
+// an ObjectMeta.
+type objectMetaValidationOptions struct {
+ /* nothing here yet */
+}
+
+// ObjectMetaValidationOption specifies a behavioral modifier for
+// ValidateObjectMetaWithOpts and ValidateObjectMetaAccessorWithOpts.
+type ObjectMetaValidationOption func(opts *objectMetaValidationOptions)
+
+// ValidateObjectMetaWithOpts validates an object's metadata on creation. It
+// expects that name generation has already been performed, so name validation
+// is always executed.
+//
+// This is similar to ValidateObjectMeta, but uses options to buy future-safety
+// and uses different signature for the name validation function. It also does
+// not directly validate the generateName field, because name generation
+// should have already been performed and it is the result of that generastion
+// that must conform to the nameFn.
+func ValidateObjectMetaWithOpts(objMeta *metav1.ObjectMeta, isNamespaced bool, nameFn ValidateNameFuncWithErrors, fldPath *field.Path, options ...ObjectMetaValidationOption) field.ErrorList {
+ metadata, err := meta.Accessor(objMeta)
+ if err != nil {
+ var allErrs field.ErrorList
+ allErrs = append(allErrs, field.InternalError(fldPath, err))
+ return allErrs
+ }
+ return ValidateObjectMetaAccessorWithOpts(metadata, isNamespaced, nameFn, fldPath, options...)
+}
+
// ValidateObjectMetaAccessor validates an object's metadata on creation. It expects that name generation has already
// been performed.
-// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
@@ -170,7 +197,57 @@ func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, name
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg))
}
}
- if requiresNamespace {
+
+ return append(allErrs, validateObjectMetaAccessorWithOptsCommon(meta, requiresNamespace, fldPath, nil)...)
+}
+
+// ValidateObjectMetaAccessorWithOpts validates an object's metadata on
+// creation. It expects that name generation has already been performed, so
+// name validation is always executed.
+//
+// This is similar to ValidateObjectMetaAccessor, but uses options to buy
+// future-safety and uses different signature for the name validation function.
+// It also does not directly validate the generateName field, because name
+// generation should have already been performed and it is the result of that
+// generastion that must conform to the nameFn.
+func ValidateObjectMetaAccessorWithOpts(meta metav1.Object, isNamespaced bool, nameFn ValidateNameFuncWithErrors, fldPath *field.Path, options ...ObjectMetaValidationOption) field.ErrorList {
+ opts := objectMetaValidationOptions{}
+ for _, opt := range options {
+ opt(&opts)
+ }
+
+ var allErrs field.ErrorList
+
+ // generateName is not directly validated here. Types can have
+ // different rules for name generation, and the nameFn is for validating
+ // the post-generation data, not the input. In the past we assumed that
+ // name generation was always "append 5 random characters", but that's not
+ // NECESSARILY true. Also, the nameFn should always be considering the max
+ // length of the name, and it doesn't know enough about the name generation
+ // to do that. Also, given a bad generateName, the user will get errors
+ // for both the generateName and name fields. We will focus validation on
+ // the name field, which should give a better UX overall.
+ // TODO(thockin): should we do a max-length check here? e.g. 1K or 4K?
+
+ if len(meta.GetGenerateName()) != 0 && len(meta.GetName()) == 0 {
+ allErrs = append(allErrs,
+ field.InternalError(fldPath.Child("name"), fmt.Errorf("generateName was specified (%q), but no name was generated", meta.GetGenerateName())))
+ }
+ if len(meta.GetName()) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required"))
+ } else {
+ allErrs = append(allErrs, nameFn(fldPath.Child("name"), meta.GetName())...)
+ }
+
+ return append(allErrs, validateObjectMetaAccessorWithOptsCommon(meta, isNamespaced, fldPath, &opts)...)
+}
+
+// validateObjectMetaAccessorWithOptsCommon is a shared function for validating
+// the parts of an ObjectMeta with are handled the same in both paths..
+func validateObjectMetaAccessorWithOptsCommon(meta metav1.Object, isNamespaced bool, fldPath *field.Path, _ *objectMetaValidationOptions) field.ErrorList {
+ var allErrs field.ErrorList
+
+ if isNamespaced {
if len(meta.GetNamespace()) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
} else {
@@ -180,6 +257,7 @@ func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, name
}
} else {
if len(meta.GetNamespace()) != 0 {
+ // TODO(thockin): change to "may not be specified on this type" or something
allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type"))
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
index e7e5c152d0..ec414a84b9 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -11,6 +11,7 @@ reviewers:
- luxas
- janetkuo
- justinsb
- - ncdc
- soltysh
- dims
+emeritus_reviewers:
+ - ncdc
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
index 15b45ffa84..5005beb12d 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
@@ -18,6 +18,7 @@ package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/utils/ptr"
)
// IsControlledBy checks if the object has a controllerRef set to the given owner
@@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference {
return nil
}
cp := *ref
+ cp.Controller = ptr.To(*ref.Controller)
+ if ref.BlockOwnerDeletion != nil {
+ cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion)
+ }
return &cp
}
-// GetControllerOf returns a pointer to the controllerRef if controllee has a controller
+// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
refs := controllee.GetOwnerReferences()
for i := range refs {
@@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference {
// NewControllerRef creates an OwnerReference pointing to the given owner.
func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
- blockOwnerDeletion := true
- isController := true
return &OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: owner.GetName(),
UID: owner.GetUID(),
- BlockOwnerDeletion: &blockOwnerDeletion,
- Controller: &isController,
+ BlockOwnerDeletion: ptr.To(true),
+ Controller: ptr.To(true),
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
index 7736753d66..31c87361f6 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
@@ -18,7 +18,8 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.apis.meta.v1
// +groupName=meta.k8s.io
-package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1"
+package v1
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 1a641e7c12..f6b1a6a4e0 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+// source: k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
package v1
@@ -23,12 +23,10 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
runtime "k8s.io/apimachinery/pkg/runtime"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -37,1477 +35,95 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-var _ = time.Kitchen
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *APIGroup) Reset() { *m = APIGroup{} }
-func (*APIGroup) ProtoMessage() {}
-func (*APIGroup) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{0}
-}
-func (m *APIGroup) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroup) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroup.Merge(m, src)
-}
-func (m *APIGroup) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroup) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroup.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroup proto.InternalMessageInfo
-
-func (m *APIGroupList) Reset() { *m = APIGroupList{} }
-func (*APIGroupList) ProtoMessage() {}
-func (*APIGroupList) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{1}
-}
-func (m *APIGroupList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroupList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroupList.Merge(m, src)
-}
-func (m *APIGroupList) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroupList) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroupList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroupList proto.InternalMessageInfo
-
-func (m *APIResource) Reset() { *m = APIResource{} }
-func (*APIResource) ProtoMessage() {}
-func (*APIResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{2}
-}
-func (m *APIResource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIResource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIResource.Merge(m, src)
-}
-func (m *APIResource) XXX_Size() int {
- return m.Size()
-}
-func (m *APIResource) XXX_DiscardUnknown() {
- xxx_messageInfo_APIResource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIResource proto.InternalMessageInfo
-
-func (m *APIResourceList) Reset() { *m = APIResourceList{} }
-func (*APIResourceList) ProtoMessage() {}
-func (*APIResourceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{3}
-}
-func (m *APIResourceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIResourceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIResourceList.Merge(m, src)
-}
-func (m *APIResourceList) XXX_Size() int {
- return m.Size()
-}
-func (m *APIResourceList) XXX_DiscardUnknown() {
- xxx_messageInfo_APIResourceList.DiscardUnknown(m)
-}
+func (m *APIGroup) Reset() { *m = APIGroup{} }
-var xxx_messageInfo_APIResourceList proto.InternalMessageInfo
+func (m *APIGroupList) Reset() { *m = APIGroupList{} }
-func (m *APIVersions) Reset() { *m = APIVersions{} }
-func (*APIVersions) ProtoMessage() {}
-func (*APIVersions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{4}
-}
-func (m *APIVersions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIVersions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIVersions.Merge(m, src)
-}
-func (m *APIVersions) XXX_Size() int {
- return m.Size()
-}
-func (m *APIVersions) XXX_DiscardUnknown() {
- xxx_messageInfo_APIVersions.DiscardUnknown(m)
-}
+func (m *APIResource) Reset() { *m = APIResource{} }
-var xxx_messageInfo_APIVersions proto.InternalMessageInfo
-
-func (m *ApplyOptions) Reset() { *m = ApplyOptions{} }
-func (*ApplyOptions) ProtoMessage() {}
-func (*ApplyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{5}
-}
-func (m *ApplyOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ApplyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ApplyOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApplyOptions.Merge(m, src)
-}
-func (m *ApplyOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *ApplyOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ApplyOptions.DiscardUnknown(m)
-}
+func (m *APIResourceList) Reset() { *m = APIResourceList{} }
-var xxx_messageInfo_ApplyOptions proto.InternalMessageInfo
+func (m *APIVersions) Reset() { *m = APIVersions{} }
-func (m *Condition) Reset() { *m = Condition{} }
-func (*Condition) ProtoMessage() {}
-func (*Condition) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{6}
-}
-func (m *Condition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Condition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Condition.Merge(m, src)
-}
-func (m *Condition) XXX_Size() int {
- return m.Size()
-}
-func (m *Condition) XXX_DiscardUnknown() {
- xxx_messageInfo_Condition.DiscardUnknown(m)
-}
+func (m *ApplyOptions) Reset() { *m = ApplyOptions{} }
-var xxx_messageInfo_Condition proto.InternalMessageInfo
+func (m *Condition) Reset() { *m = Condition{} }
-func (m *CreateOptions) Reset() { *m = CreateOptions{} }
-func (*CreateOptions) ProtoMessage() {}
-func (*CreateOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{7}
-}
-func (m *CreateOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CreateOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CreateOptions.Merge(m, src)
-}
-func (m *CreateOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *CreateOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_CreateOptions.DiscardUnknown(m)
-}
+func (m *CreateOptions) Reset() { *m = CreateOptions{} }
-var xxx_messageInfo_CreateOptions proto.InternalMessageInfo
+func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
-func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
-func (*DeleteOptions) ProtoMessage() {}
-func (*DeleteOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{8}
-}
-func (m *DeleteOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeleteOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteOptions.Merge(m, src)
-}
-func (m *DeleteOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *DeleteOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteOptions.DiscardUnknown(m)
-}
+func (m *Duration) Reset() { *m = Duration{} }
-var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo
+func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} }
-func (m *Duration) Reset() { *m = Duration{} }
-func (*Duration) ProtoMessage() {}
-func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{9}
-}
-func (m *Duration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Duration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Duration.Merge(m, src)
-}
-func (m *Duration) XXX_Size() int {
- return m.Size()
-}
-func (m *Duration) XXX_DiscardUnknown() {
- xxx_messageInfo_Duration.DiscardUnknown(m)
-}
+func (m *FieldsV1) Reset() { *m = FieldsV1{} }
-var xxx_messageInfo_Duration proto.InternalMessageInfo
+func (m *GetOptions) Reset() { *m = GetOptions{} }
-func (m *FieldsV1) Reset() { *m = FieldsV1{} }
-func (*FieldsV1) ProtoMessage() {}
-func (*FieldsV1) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{10}
-}
-func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FieldsV1) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldsV1.Merge(m, src)
-}
-func (m *FieldsV1) XXX_Size() int {
- return m.Size()
-}
-func (m *FieldsV1) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldsV1.DiscardUnknown(m)
-}
+func (m *GroupKind) Reset() { *m = GroupKind{} }
-var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
+func (m *GroupResource) Reset() { *m = GroupResource{} }
-func (m *GetOptions) Reset() { *m = GetOptions{} }
-func (*GetOptions) ProtoMessage() {}
-func (*GetOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{11}
-}
-func (m *GetOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GetOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetOptions.Merge(m, src)
-}
-func (m *GetOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *GetOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_GetOptions.DiscardUnknown(m)
-}
+func (m *GroupVersion) Reset() { *m = GroupVersion{} }
-var xxx_messageInfo_GetOptions proto.InternalMessageInfo
+func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
-func (m *GroupKind) Reset() { *m = GroupKind{} }
-func (*GroupKind) ProtoMessage() {}
-func (*GroupKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{12}
-}
-func (m *GroupKind) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupKind) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupKind.Merge(m, src)
-}
-func (m *GroupKind) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupKind) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupKind.DiscardUnknown(m)
-}
+func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
-var xxx_messageInfo_GroupKind proto.InternalMessageInfo
+func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
-func (m *GroupResource) Reset() { *m = GroupResource{} }
-func (*GroupResource) ProtoMessage() {}
-func (*GroupResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{13}
-}
-func (m *GroupResource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupResource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupResource.Merge(m, src)
-}
-func (m *GroupResource) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupResource) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupResource.DiscardUnknown(m)
-}
+func (m *LabelSelector) Reset() { *m = LabelSelector{} }
-var xxx_messageInfo_GroupResource proto.InternalMessageInfo
+func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
-func (m *GroupVersion) Reset() { *m = GroupVersion{} }
-func (*GroupVersion) ProtoMessage() {}
-func (*GroupVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{14}
-}
-func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersion) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersion.Merge(m, src)
-}
-func (m *GroupVersion) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersion) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersion.DiscardUnknown(m)
-}
+func (m *List) Reset() { *m = List{} }
-var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
+func (m *ListMeta) Reset() { *m = ListMeta{} }
-func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
-func (*GroupVersionForDiscovery) ProtoMessage() {}
-func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{15}
-}
-func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src)
-}
-func (m *GroupVersionForDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m)
-}
+func (m *ListOptions) Reset() { *m = ListOptions{} }
-var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
+func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
-func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
-func (*GroupVersionKind) ProtoMessage() {}
-func (*GroupVersionKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{16}
-}
-func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersionKind) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersionKind.Merge(m, src)
-}
-func (m *GroupVersionKind) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersionKind) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersionKind.DiscardUnknown(m)
-}
+func (m *MicroTime) Reset() { *m = MicroTime{} }
-var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
+func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
-func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
-func (*GroupVersionResource) ProtoMessage() {}
-func (*GroupVersionResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{17}
-}
-func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersionResource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersionResource.Merge(m, src)
-}
-func (m *GroupVersionResource) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersionResource) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersionResource.DiscardUnknown(m)
-}
+func (m *OwnerReference) Reset() { *m = OwnerReference{} }
-var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
+func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
-func (m *LabelSelector) Reset() { *m = LabelSelector{} }
-func (*LabelSelector) ProtoMessage() {}
-func (*LabelSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{18}
-}
-func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LabelSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelSelector.Merge(m, src)
-}
-func (m *LabelSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *LabelSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelSelector.DiscardUnknown(m)
-}
+func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
-var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
+func (m *Patch) Reset() { *m = Patch{} }
-func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
-func (*LabelSelectorRequirement) ProtoMessage() {}
-func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{19}
-}
-func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelSelectorRequirement.Merge(m, src)
-}
-func (m *LabelSelectorRequirement) XXX_Size() int {
- return m.Size()
-}
-func (m *LabelSelectorRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m)
-}
+func (m *PatchOptions) Reset() { *m = PatchOptions{} }
-var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
+func (m *Preconditions) Reset() { *m = Preconditions{} }
-func (m *List) Reset() { *m = List{} }
-func (*List) ProtoMessage() {}
-func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{20}
-}
-func (m *List) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *List) XXX_Merge(src proto.Message) {
- xxx_messageInfo_List.Merge(m, src)
-}
-func (m *List) XXX_Size() int {
- return m.Size()
-}
-func (m *List) XXX_DiscardUnknown() {
- xxx_messageInfo_List.DiscardUnknown(m)
-}
+func (m *RootPaths) Reset() { *m = RootPaths{} }
-var xxx_messageInfo_List proto.InternalMessageInfo
+func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
-func (m *ListMeta) Reset() { *m = ListMeta{} }
-func (*ListMeta) ProtoMessage() {}
-func (*ListMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{21}
-}
-func (m *ListMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ListMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListMeta.Merge(m, src)
-}
-func (m *ListMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *ListMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_ListMeta.DiscardUnknown(m)
-}
+func (m *Status) Reset() { *m = Status{} }
-var xxx_messageInfo_ListMeta proto.InternalMessageInfo
+func (m *StatusCause) Reset() { *m = StatusCause{} }
-func (m *ListOptions) Reset() { *m = ListOptions{} }
-func (*ListOptions) ProtoMessage() {}
-func (*ListOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{22}
-}
-func (m *ListOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ListOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListOptions.Merge(m, src)
-}
-func (m *ListOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *ListOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ListOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ListOptions proto.InternalMessageInfo
-
-func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
-func (*ManagedFieldsEntry) ProtoMessage() {}
-func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{23}
-}
-func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManagedFieldsEntry.Merge(m, src)
-}
-func (m *ManagedFieldsEntry) XXX_Size() int {
- return m.Size()
-}
-func (m *ManagedFieldsEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
-
-func (m *MicroTime) Reset() { *m = MicroTime{} }
-func (*MicroTime) ProtoMessage() {}
-func (*MicroTime) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{24}
-}
-func (m *MicroTime) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MicroTime.Unmarshal(m, b)
-}
-func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic)
-}
-func (m *MicroTime) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MicroTime.Merge(m, src)
-}
-func (m *MicroTime) XXX_Size() int {
- return xxx_messageInfo_MicroTime.Size(m)
-}
-func (m *MicroTime) XXX_DiscardUnknown() {
- xxx_messageInfo_MicroTime.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MicroTime proto.InternalMessageInfo
-
-func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
-func (*ObjectMeta) ProtoMessage() {}
-func (*ObjectMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{25}
-}
-func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMeta.Merge(m, src)
-}
-func (m *ObjectMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMeta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
-
-func (m *OwnerReference) Reset() { *m = OwnerReference{} }
-func (*OwnerReference) ProtoMessage() {}
-func (*OwnerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{26}
-}
-func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *OwnerReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OwnerReference.Merge(m, src)
-}
-func (m *OwnerReference) XXX_Size() int {
- return m.Size()
-}
-func (m *OwnerReference) XXX_DiscardUnknown() {
- xxx_messageInfo_OwnerReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
-
-func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
-func (*PartialObjectMetadata) ProtoMessage() {}
-func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{27}
-}
-func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PartialObjectMetadata.Merge(m, src)
-}
-func (m *PartialObjectMetadata) XXX_Size() int {
- return m.Size()
-}
-func (m *PartialObjectMetadata) XXX_DiscardUnknown() {
- xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
-
-func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
-func (*PartialObjectMetadataList) ProtoMessage() {}
-func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{28}
-}
-func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
-}
-func (m *PartialObjectMetadataList) XXX_Size() int {
- return m.Size()
-}
-func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
- xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
-
-func (m *Patch) Reset() { *m = Patch{} }
-func (*Patch) ProtoMessage() {}
-func (*Patch) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{29}
-}
-func (m *Patch) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Patch) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Patch.Merge(m, src)
-}
-func (m *Patch) XXX_Size() int {
- return m.Size()
-}
-func (m *Patch) XXX_DiscardUnknown() {
- xxx_messageInfo_Patch.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Patch proto.InternalMessageInfo
-
-func (m *PatchOptions) Reset() { *m = PatchOptions{} }
-func (*PatchOptions) ProtoMessage() {}
-func (*PatchOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{30}
-}
-func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PatchOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PatchOptions.Merge(m, src)
-}
-func (m *PatchOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *PatchOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_PatchOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
-
-func (m *Preconditions) Reset() { *m = Preconditions{} }
-func (*Preconditions) ProtoMessage() {}
-func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{31}
-}
-func (m *Preconditions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Preconditions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Preconditions.Merge(m, src)
-}
-func (m *Preconditions) XXX_Size() int {
- return m.Size()
-}
-func (m *Preconditions) XXX_DiscardUnknown() {
- xxx_messageInfo_Preconditions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Preconditions proto.InternalMessageInfo
-
-func (m *RootPaths) Reset() { *m = RootPaths{} }
-func (*RootPaths) ProtoMessage() {}
-func (*RootPaths) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{32}
-}
-func (m *RootPaths) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RootPaths) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RootPaths.Merge(m, src)
-}
-func (m *RootPaths) XXX_Size() int {
- return m.Size()
-}
-func (m *RootPaths) XXX_DiscardUnknown() {
- xxx_messageInfo_RootPaths.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RootPaths proto.InternalMessageInfo
-
-func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
-func (*ServerAddressByClientCIDR) ProtoMessage() {}
-func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{33}
-}
-func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src)
-}
-func (m *ServerAddressByClientCIDR) XXX_Size() int {
- return m.Size()
-}
-func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
-
-func (m *Status) Reset() { *m = Status{} }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{34}
-}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
-}
-func (m *Status) XXX_Size() int {
- return m.Size()
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
-
-func (m *StatusCause) Reset() { *m = StatusCause{} }
-func (*StatusCause) ProtoMessage() {}
-func (*StatusCause) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{35}
-}
-func (m *StatusCause) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatusCause) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusCause.Merge(m, src)
-}
-func (m *StatusCause) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusCause) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusCause.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusCause proto.InternalMessageInfo
-
-func (m *StatusDetails) Reset() { *m = StatusDetails{} }
-func (*StatusDetails) ProtoMessage() {}
-func (*StatusDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{36}
-}
-func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatusDetails) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusDetails.Merge(m, src)
-}
-func (m *StatusDetails) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusDetails) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusDetails.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
-
-func (m *TableOptions) Reset() { *m = TableOptions{} }
-func (*TableOptions) ProtoMessage() {}
-func (*TableOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{37}
-}
-func (m *TableOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TableOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TableOptions.Merge(m, src)
-}
-func (m *TableOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *TableOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_TableOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TableOptions proto.InternalMessageInfo
-
-func (m *Time) Reset() { *m = Time{} }
-func (*Time) ProtoMessage() {}
-func (*Time) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{38}
-}
-func (m *Time) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Time.Unmarshal(m, b)
-}
-func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Time.Marshal(b, m, deterministic)
-}
-func (m *Time) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Time.Merge(m, src)
-}
-func (m *Time) XXX_Size() int {
- return xxx_messageInfo_Time.Size(m)
-}
-func (m *Time) XXX_DiscardUnknown() {
- xxx_messageInfo_Time.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Time proto.InternalMessageInfo
-
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{39}
-}
-func (m *Timestamp) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Timestamp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Timestamp.Merge(m, src)
-}
-func (m *Timestamp) XXX_Size() int {
- return m.Size()
-}
-func (m *Timestamp) XXX_DiscardUnknown() {
- xxx_messageInfo_Timestamp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Timestamp proto.InternalMessageInfo
-
-func (m *TypeMeta) Reset() { *m = TypeMeta{} }
-func (*TypeMeta) ProtoMessage() {}
-func (*TypeMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{40}
-}
-func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypeMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeMeta.Merge(m, src)
-}
-func (m *TypeMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *TypeMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeMeta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
-
-func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
-func (*UpdateOptions) ProtoMessage() {}
-func (*UpdateOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{41}
-}
-func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UpdateOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UpdateOptions.Merge(m, src)
-}
-func (m *UpdateOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *UpdateOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_UpdateOptions.DiscardUnknown(m)
-}
+func (m *StatusDetails) Reset() { *m = StatusDetails{} }
-var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
+func (m *TableOptions) Reset() { *m = TableOptions{} }
-func (m *Verbs) Reset() { *m = Verbs{} }
-func (*Verbs) ProtoMessage() {}
-func (*Verbs) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{42}
-}
-func (m *Verbs) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Verbs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Verbs.Merge(m, src)
-}
-func (m *Verbs) XXX_Size() int {
- return m.Size()
-}
-func (m *Verbs) XXX_DiscardUnknown() {
- xxx_messageInfo_Verbs.DiscardUnknown(m)
-}
+func (m *Time) Reset() { *m = Time{} }
-var xxx_messageInfo_Verbs proto.InternalMessageInfo
+func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (m *WatchEvent) Reset() { *m = WatchEvent{} }
-func (*WatchEvent) ProtoMessage() {}
-func (*WatchEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{43}
-}
-func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *WatchEvent) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchEvent.Merge(m, src)
-}
-func (m *WatchEvent) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchEvent) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchEvent.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchEvent proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup")
- proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList")
- proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource")
- proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList")
- proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions")
- proto.RegisterType((*ApplyOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ApplyOptions")
- proto.RegisterType((*Condition)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Condition")
- proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
- proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
- proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
- proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
- proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
- proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
- proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource")
- proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion")
- proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery")
- proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind")
- proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource")
- proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry")
- proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement")
- proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List")
- proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta")
- proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions")
- proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry")
- proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime")
- proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry")
- proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference")
- proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata")
- proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList")
- proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch")
- proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions")
- proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions")
- proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths")
- proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR")
- proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status")
- proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause")
- proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails")
- proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions")
- proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time")
- proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp")
- proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta")
- proto.RegisterType((*UpdateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.UpdateOptions")
- proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs")
- proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367)
-}
-
-var fileDescriptor_cf52fa777ced5367 = []byte{
- // 2867 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47,
- 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44,
- 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d,
- 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a,
- 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88,
- 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85,
- 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa,
- 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0xec, 0x1c, 0x5f, 0x63, 0x75, 0xc7, 0x5f, 0x3f, 0xee, 0x1d, 0x10,
- 0xea, 0x91, 0x80, 0xb0, 0xf5, 0x13, 0xe2, 0xd9, 0x3e, 0x5d, 0x57, 0x08, 0xab, 0xeb, 0x74, 0xac,
- 0xd6, 0x91, 0xe3, 0x11, 0xda, 0x5f, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xad, 0x77, 0x48, 0x60, 0xad,
- 0x9f, 0x5c, 0x59, 0x6f, 0x13, 0x8f, 0x50, 0x2b, 0x20, 0x76, 0xbd, 0x4b, 0xfd, 0xc0, 0x47, 0x8f,
- 0x49, 0xae, 0xba, 0xce, 0x55, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0xe4, 0xca,
- 0xca, 0x53, 0x6d, 0x27, 0x38, 0xea, 0x1d, 0xd4, 0x5b, 0x7e, 0x67, 0xbd, 0xed, 0xb7, 0xfd, 0x75,
- 0xc1, 0x7c, 0xd0, 0x3b, 0x14, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0x26, 0x9a, 0x42, 0x7b,
- 0x5e, 0xe0, 0x74, 0xc8, 0xb0, 0x15, 0x2b, 0xcf, 0x9e, 0xc7, 0xc0, 0x5a, 0x47, 0xa4, 0x63, 0x0d,
- 0xf3, 0x99, 0x7f, 0xca, 0x42, 0x61, 0x63, 0x6f, 0xfb, 0x26, 0xf5, 0x7b, 0x5d, 0xb4, 0x06, 0x39,
- 0xcf, 0xea, 0x90, 0xaa, 0xb1, 0x66, 0x5c, 0x2e, 0x36, 0xca, 0x1f, 0x0c, 0x6a, 0x33, 0xa7, 0x83,
- 0x5a, 0xee, 0x55, 0xab, 0x43, 0xb0, 0xc0, 0x20, 0x17, 0x0a, 0x27, 0x84, 0x32, 0xc7, 0xf7, 0x58,
- 0x35, 0xb3, 0x96, 0xbd, 0x5c, 0xba, 0xfa, 0x62, 0x3d, 0xcd, 0xfa, 0xeb, 0x42, 0xc1, 0x5d, 0xc9,
- 0xba, 0xe5, 0xd3, 0xa6, 0xc3, 0x5a, 0xfe, 0x09, 0xa1, 0xfd, 0xc6, 0xa2, 0xd2, 0x52, 0x50, 0x48,
- 0x86, 0x23, 0x0d, 0xe8, 0x47, 0x06, 0x2c, 0x76, 0x29, 0x39, 0x24, 0x94, 0x12, 0x5b, 0xe1, 0xab,
- 0xd9, 0x35, 0xe3, 0x21, 0xa8, 0xad, 0x2a, 0xb5, 0x8b, 0x7b, 0x43, 0xf2, 0xf1, 0x88, 0x46, 0xf4,
- 0x6b, 0x03, 0x56, 0x18, 0xa1, 0x27, 0x84, 0x6e, 0xd8, 0x36, 0x25, 0x8c, 0x35, 0xfa, 0x9b, 0xae,
- 0x43, 0xbc, 0x60, 0x73, 0xbb, 0x89, 0x59, 0x35, 0x27, 0xf6, 0xe1, 0xeb, 0xe9, 0x0c, 0xda, 0x9f,
- 0x24, 0xa7, 0x61, 0x2a, 0x8b, 0x56, 0x26, 0x92, 0x30, 0x7c, 0x1f, 0x33, 0xcc, 0x43, 0x28, 0x87,
- 0x07, 0x79, 0xcb, 0x61, 0x01, 0xba, 0x0b, 0xb3, 0x6d, 0xfe, 0xc1, 0xaa, 0x86, 0x30, 0xb0, 0x9e,
- 0xce, 0xc0, 0x50, 0x46, 0x63, 0x5e, 0xd9, 0x33, 0x2b, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0xe5,
- 0xa0, 0xb4, 0xb1, 0xb7, 0x8d, 0x09, 0xf3, 0x7b, 0xb4, 0x45, 0x52, 0x38, 0xcd, 0x35, 0x28, 0x33,
- 0xc7, 0x6b, 0xf7, 0x5c, 0x8b, 0x72, 0x68, 0x75, 0x56, 0x50, 0x2e, 0x2b, 0xca, 0xf2, 0xbe, 0x86,
- 0xc3, 0x09, 0x4a, 0x74, 0x15, 0x80, 0x4b, 0x60, 0x5d, 0xab, 0x45, 0xec, 0x6a, 0x66, 0xcd, 0xb8,
- 0x5c, 0x68, 0x20, 0xc5, 0x07, 0xaf, 0x46, 0x18, 0xac, 0x51, 0xa1, 0x47, 0x21, 0x2f, 0x2c, 0xad,
- 0x16, 0x84, 0x9a, 0x8a, 0x22, 0xcf, 0x8b, 0x65, 0x60, 0x89, 0x43, 0x4f, 0xc0, 0x9c, 0xf2, 0xb2,
- 0x6a, 0x51, 0x90, 0x2d, 0x28, 0xb2, 0xb9, 0xd0, 0x0d, 0x42, 0x3c, 0x5f, 0xdf, 0xb1, 0xe3, 0xd9,
- 0xc2, 0xef, 0xb4, 0xf5, 0xbd, 0xe2, 0x78, 0x36, 0x16, 0x18, 0x74, 0x0b, 0xf2, 0x27, 0x84, 0x1e,
- 0x70, 0x4f, 0xe0, 0xae, 0xf9, 0xe5, 0x74, 0x1b, 0x7d, 0x97, 0xb3, 0x34, 0x8a, 0xdc, 0x34, 0xf1,
- 0x13, 0x4b, 0x21, 0xa8, 0x0e, 0xc0, 0x8e, 0x7c, 0x1a, 0x88, 0xe5, 0x55, 0xf3, 0x6b, 0xd9, 0xcb,
- 0xc5, 0xc6, 0x3c, 0x5f, 0xef, 0x7e, 0x04, 0xc5, 0x1a, 0x05, 0xa7, 0x6f, 0x59, 0x01, 0x69, 0xfb,
- 0xd4, 0x21, 0xac, 0x3a, 0x17, 0xd3, 0x6f, 0x46, 0x50, 0xac, 0x51, 0xa0, 0x97, 0x01, 0xb1, 0xc0,
- 0xa7, 0x56, 0x9b, 0xa8, 0xa5, 0xbe, 0x64, 0xb1, 0xa3, 0x2a, 0x88, 0xd5, 0xad, 0xa8, 0xd5, 0xa1,
- 0xfd, 0x11, 0x0a, 0x3c, 0x86, 0xcb, 0xfc, 0x9d, 0x01, 0x0b, 0x9a, 0x2f, 0x08, 0xbf, 0xbb, 0x06,
- 0xe5, 0xb6, 0x76, 0xeb, 0x94, 0x5f, 0x44, 0xa7, 0xad, 0xdf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a,
- 0x54, 0x49, 0x0a, 0xa3, 0xcb, 0x95, 0xd4, 0x4e, 0x1b, 0xda, 0x10, 0x6b, 0xd2, 0x80, 0x0c, 0xc7,
- 0x92, 0xcd, 0x7f, 0x18, 0xc2, 0x81, 0xc3, 0x78, 0x83, 0x2e, 0x6b, 0x31, 0xcd, 0x10, 0xdb, 0x57,
- 0x9e, 0x10, 0x8f, 0xce, 0x09, 0x04, 0x99, 0xff, 0x8b, 0x40, 0x70, 0xbd, 0xf0, 0xcb, 0xf7, 0x6a,
- 0x33, 0x6f, 0xff, 0x6d, 0x6d, 0xc6, 0xfc, 0x85, 0x01, 0xe5, 0x8d, 0x6e, 0xd7, 0xed, 0xef, 0x76,
- 0x03, 0xb1, 0x00, 0x13, 0x66, 0x6d, 0xda, 0xc7, 0x3d, 0x4f, 0x2d, 0x14, 0xf8, 0xfd, 0x6e, 0x0a,
- 0x08, 0x56, 0x18, 0x7e, 0x7f, 0x0e, 0x7d, 0xda, 0x22, 0xea, 0xba, 0x45, 0xf7, 0x67, 0x8b, 0x03,
- 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x74, 0x88, 0x6b, 0xef, 0x58, 0x9e, 0xd5, 0x26, 0x54, 0x5d, 0x8e,
- 0x68, 0xeb, 0xb7, 0x34, 0x1c, 0x4e, 0x50, 0x9a, 0xff, 0xc9, 0x40, 0x71, 0xd3, 0xf7, 0x6c, 0x27,
- 0x50, 0x97, 0x2b, 0xe8, 0x77, 0x47, 0x82, 0xc7, 0xed, 0x7e, 0x97, 0x60, 0x81, 0x41, 0xcf, 0xc1,
- 0x2c, 0x0b, 0xac, 0xa0, 0xc7, 0x84, 0x3d, 0xc5, 0xc6, 0x23, 0x61, 0x58, 0xda, 0x17, 0xd0, 0xb3,
- 0x41, 0x6d, 0x21, 0x12, 0x27, 0x41, 0x58, 0x31, 0x70, 0x4f, 0xf7, 0x0f, 0xc4, 0x46, 0xd9, 0x37,
- 0xe5, 0xb3, 0x17, 0xbe, 0x1f, 0xd9, 0xd8, 0xd3, 0x77, 0x47, 0x28, 0xf0, 0x18, 0x2e, 0x74, 0x02,
- 0xc8, 0xb5, 0x58, 0x70, 0x9b, 0x5a, 0x1e, 0x13, 0xba, 0x6e, 0x3b, 0x1d, 0xa2, 0x2e, 0xfc, 0x97,
- 0xd2, 0x9d, 0x38, 0xe7, 0x88, 0xf5, 0xde, 0x1a, 0x91, 0x86, 0xc7, 0x68, 0x40, 0x8f, 0xc3, 0x2c,
- 0x25, 0x16, 0xf3, 0xbd, 0x6a, 0x5e, 0x2c, 0x3f, 0x8a, 0xca, 0x58, 0x40, 0xb1, 0xc2, 0xf2, 0x80,
- 0xd6, 0x21, 0x8c, 0x59, 0xed, 0x30, 0xbc, 0x46, 0x01, 0x6d, 0x47, 0x82, 0x71, 0x88, 0x37, 0x7f,
- 0x6b, 0x40, 0x65, 0x93, 0x12, 0x2b, 0x20, 0xd3, 0xb8, 0xc5, 0xa7, 0x3e, 0x71, 0xb4, 0x01, 0x0b,
- 0xe2, 0xfb, 0xae, 0xe5, 0x3a, 0xb6, 0x3c, 0x83, 0x9c, 0x60, 0xfe, 0xbc, 0x62, 0x5e, 0xd8, 0x4a,
- 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x93, 0x2c, 0x54, 0x9a, 0xc4, 0x25, 0xb1, 0xc9, 0x5b, 0x80, 0xda,
- 0xd4, 0x6a, 0x91, 0x3d, 0x42, 0x1d, 0xdf, 0xde, 0x27, 0x2d, 0xdf, 0xb3, 0x99, 0x70, 0xa3, 0x6c,
- 0xe3, 0x73, 0x7c, 0x7f, 0x6f, 0x8e, 0x60, 0xf1, 0x18, 0x0e, 0xe4, 0x42, 0xa5, 0x4b, 0xc5, 0x6f,
- 0xb1, 0xe7, 0xd2, 0xcb, 0x4a, 0x57, 0xbf, 0x92, 0xee, 0x48, 0xf7, 0x74, 0xd6, 0xc6, 0xd2, 0xe9,
- 0xa0, 0x56, 0x49, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x06, 0x2c, 0xfa, 0xb4, 0x7b, 0x64, 0x79, 0x4d,
- 0xd2, 0x25, 0x9e, 0x4d, 0xbc, 0x80, 0x89, 0x8d, 0x2c, 0x34, 0x96, 0x79, 0x2e, 0xb2, 0x3b, 0x84,
- 0xc3, 0x23, 0xd4, 0xe8, 0x35, 0x58, 0xea, 0x52, 0xbf, 0x6b, 0xb5, 0xc5, 0xc6, 0xec, 0xf9, 0xae,
- 0xd3, 0xea, 0xab, 0xed, 0x7c, 0xf2, 0x74, 0x50, 0x5b, 0xda, 0x1b, 0x46, 0x9e, 0x0d, 0x6a, 0x17,
- 0xc4, 0xd6, 0x71, 0x48, 0x8c, 0xc4, 0xa3, 0x62, 0x34, 0x37, 0xc8, 0x4f, 0x72, 0x03, 0x73, 0x1b,
- 0x0a, 0xcd, 0x9e, 0xba, 0x13, 0x2f, 0x40, 0xc1, 0x56, 0xbf, 0xd5, 0xce, 0x87, 0x97, 0x33, 0xa2,
- 0x39, 0x1b, 0xd4, 0x2a, 0x3c, 0xfd, 0xac, 0x87, 0x00, 0x1c, 0xb1, 0x98, 0x8f, 0x43, 0x41, 0x1c,
- 0x3c, 0xbb, 0x7b, 0x05, 0x2d, 0x42, 0x16, 0x5b, 0xf7, 0x84, 0x94, 0x32, 0xe6, 0x3f, 0xb5, 0x28,
- 0xb6, 0x0b, 0x70, 0x93, 0x04, 0xe1, 0xc1, 0x6f, 0xc0, 0x42, 0x18, 0xca, 0x93, 0x2f, 0x4c, 0xe4,
- 0x4d, 0x38, 0x89, 0xc6, 0xc3, 0xf4, 0xe6, 0xeb, 0x50, 0x14, 0xaf, 0x10, 0x7f, 0xc2, 0xe3, 0x74,
- 0xc1, 0xb8, 0x4f, 0xba, 0x10, 0xe6, 0x00, 0x99, 0x49, 0x39, 0x80, 0x66, 0xae, 0x0b, 0x15, 0xc9,
- 0x1b, 0x26, 0x48, 0xa9, 0x34, 0x3c, 0x09, 0x85, 0xd0, 0x4c, 0xa5, 0x25, 0x4a, 0x8c, 0x43, 0x41,
- 0x38, 0xa2, 0xd0, 0xb4, 0x1d, 0x41, 0xe2, 0x45, 0x4d, 0xa7, 0x4c, 0xcb, 0x7e, 0x32, 0xf7, 0xcf,
- 0x7e, 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6c, 0xfa, 0x01, 0xde, 0xfc, 0xf4, 0xa6, 0x98, 0xef,
- 0x18, 0xb0, 0xa8, 0x4b, 0x4a, 0x7f, 0x7c, 0xe9, 0x95, 0x9c, 0x9f, 0xed, 0x69, 0x3b, 0xf2, 0x2b,
- 0x03, 0x96, 0x13, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, 0x30, 0x4a, 0x77, 0x8e, 0xec, 0x14, 0xce, 0xf1,
- 0x97, 0x0c, 0x54, 0x6e, 0x59, 0x07, 0xc4, 0xdd, 0x27, 0x2e, 0x69, 0x05, 0x3e, 0x45, 0x3f, 0x80,
- 0x52, 0xc7, 0x0a, 0x5a, 0x47, 0x02, 0x1a, 0x56, 0x06, 0xcd, 0x74, 0xc1, 0x2e, 0x21, 0xa9, 0xbe,
- 0x13, 0x8b, 0xb9, 0xe1, 0x05, 0xb4, 0xdf, 0xb8, 0xa0, 0x4c, 0x2a, 0x69, 0x18, 0xac, 0x6b, 0x13,
- 0xe5, 0x9c, 0xf8, 0xbe, 0xf1, 0x56, 0x97, 0xa7, 0x2d, 0xd3, 0x57, 0x91, 0x09, 0x13, 0x30, 0x79,
- 0xb3, 0xe7, 0x50, 0xd2, 0x21, 0x5e, 0x10, 0x97, 0x73, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0x95,
- 0x17, 0x61, 0x71, 0xd8, 0x78, 0x1e, 0x7f, 0x8e, 0x49, 0x5f, 0x9e, 0x17, 0xe6, 0x3f, 0xd1, 0x32,
- 0xe4, 0x4f, 0x2c, 0xb7, 0xa7, 0x6e, 0x23, 0x96, 0x1f, 0xd7, 0x33, 0xd7, 0x0c, 0xf3, 0x37, 0x06,
- 0x54, 0x27, 0x19, 0x82, 0xbe, 0xa8, 0x09, 0x6a, 0x94, 0x94, 0x55, 0xd9, 0x57, 0x48, 0x5f, 0x4a,
- 0xbd, 0x01, 0x05, 0xbf, 0xcb, 0x73, 0x0a, 0x9f, 0xaa, 0x53, 0x7f, 0x22, 0x3c, 0xc9, 0x5d, 0x05,
- 0x3f, 0x1b, 0xd4, 0x2e, 0x26, 0xc4, 0x87, 0x08, 0x1c, 0xb1, 0xf2, 0x48, 0x2d, 0xec, 0xe1, 0xaf,
- 0x47, 0x14, 0xa9, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0xf7, 0x06, 0xe4, 0x44, 0x42, 0xfe, 0x3a,
- 0x14, 0xf8, 0xfe, 0xd9, 0x56, 0x60, 0x09, 0xbb, 0x52, 0x97, 0x82, 0x9c, 0x7b, 0x87, 0x04, 0x56,
- 0xec, 0x6d, 0x21, 0x04, 0x47, 0x12, 0x11, 0x86, 0xbc, 0x13, 0x90, 0x4e, 0x78, 0x90, 0x4f, 0x4d,
- 0x14, 0xad, 0x1a, 0x11, 0x75, 0x6c, 0xdd, 0xbb, 0xf1, 0x56, 0x40, 0x3c, 0x7e, 0x18, 0xf1, 0xd5,
- 0xd8, 0xe6, 0x32, 0xb0, 0x14, 0x65, 0xfe, 0xcb, 0x80, 0x48, 0x15, 0x77, 0x7e, 0x46, 0xdc, 0xc3,
- 0x5b, 0x8e, 0x77, 0xac, 0xb6, 0x35, 0x32, 0x67, 0x5f, 0xc1, 0x71, 0x44, 0x31, 0xee, 0x79, 0xc8,
- 0x4c, 0xf7, 0x3c, 0x70, 0x85, 0x2d, 0xdf, 0x0b, 0x1c, 0xaf, 0x37, 0x72, 0xdb, 0x36, 0x15, 0x1c,
- 0x47, 0x14, 0x3c, 0x11, 0xa1, 0xa4, 0x63, 0x39, 0x9e, 0xe3, 0xb5, 0xf9, 0x22, 0x36, 0xfd, 0x9e,
- 0x17, 0x88, 0x17, 0x59, 0x25, 0x22, 0x78, 0x04, 0x8b, 0xc7, 0x70, 0x98, 0xff, 0xce, 0x41, 0x89,
- 0xaf, 0x39, 0x7c, 0xe7, 0x9e, 0x87, 0x8a, 0xab, 0x7b, 0x81, 0x5a, 0xfb, 0x45, 0x65, 0x4a, 0xf2,
- 0x5e, 0xe3, 0x24, 0x2d, 0x67, 0x16, 0x29, 0x54, 0xc4, 0x9c, 0x49, 0x32, 0x6f, 0xe9, 0x48, 0x9c,
- 0xa4, 0xe5, 0xd1, 0xeb, 0x1e, 0xbf, 0x1f, 0x2a, 0x33, 0x89, 0x8e, 0xe8, 0x9b, 0x1c, 0x88, 0x25,
- 0x0e, 0xed, 0xc0, 0x05, 0xcb, 0x75, 0xfd, 0x7b, 0x02, 0xd8, 0xf0, 0xfd, 0xe3, 0x8e, 0x45, 0x8f,
- 0x99, 0x28, 0xa6, 0x0b, 0x8d, 0x2f, 0x28, 0x96, 0x0b, 0x1b, 0xa3, 0x24, 0x78, 0x1c, 0xdf, 0xb8,
- 0x63, 0xcb, 0x4d, 0x79, 0x6c, 0x47, 0xb0, 0x3c, 0x04, 0x12, 0xb7, 0x5c, 0x55, 0xb6, 0xcf, 0x28,
- 0x39, 0xcb, 0x78, 0x0c, 0xcd, 0xd9, 0x04, 0x38, 0x1e, 0x2b, 0x11, 0x5d, 0x87, 0x79, 0xee, 0xc9,
- 0x7e, 0x2f, 0x08, 0xf3, 0xce, 0xbc, 0x38, 0x6e, 0x74, 0x3a, 0xa8, 0xcd, 0xdf, 0x4e, 0x60, 0xf0,
- 0x10, 0x25, 0xdf, 0x5c, 0xd7, 0xe9, 0x38, 0x41, 0x75, 0x4e, 0xb0, 0x44, 0x9b, 0x7b, 0x8b, 0x03,
- 0xb1, 0xc4, 0x25, 0x3c, 0xb0, 0x70, 0xae, 0x07, 0x6e, 0xc2, 0x12, 0x23, 0x9e, 0xbd, 0xed, 0x39,
- 0x81, 0x63, 0xb9, 0x37, 0x4e, 0x44, 0x56, 0x59, 0x12, 0x07, 0x71, 0x91, 0xa7, 0x84, 0xfb, 0xc3,
- 0x48, 0x3c, 0x4a, 0x6f, 0xfe, 0x39, 0x0b, 0x48, 0x26, 0xec, 0xb6, 0x4c, 0xca, 0x64, 0x5c, 0xe4,
- 0x65, 0x85, 0x4a, 0xf8, 0x8d, 0xa1, 0xb2, 0x42, 0xe5, 0xfa, 0x21, 0x1e, 0xed, 0x40, 0x51, 0xc6,
- 0xa7, 0xf8, 0xce, 0xad, 0x2b, 0xe2, 0xe2, 0x6e, 0x88, 0x38, 0x1b, 0xd4, 0x56, 0x12, 0x6a, 0x22,
- 0x8c, 0x28, 0xf9, 0x62, 0x09, 0xe8, 0x2a, 0x80, 0xd5, 0x75, 0xf4, 0xa6, 0x5f, 0x31, 0x6e, 0xfd,
- 0xc4, 0xe5, 0x3b, 0xd6, 0xa8, 0xd0, 0x4b, 0x90, 0x0b, 0x3e, 0x5d, 0x59, 0x56, 0x10, 0x55, 0x27,
- 0x2f, 0xc2, 0x84, 0x04, 0xae, 0x5d, 0x5c, 0x0a, 0xc6, 0xcd, 0x52, 0x15, 0x55, 0xa4, 0x7d, 0x2b,
- 0xc2, 0x60, 0x8d, 0x0a, 0x7d, 0x0b, 0x0a, 0x87, 0x2a, 0x9f, 0x15, 0xa7, 0x9b, 0x3a, 0xce, 0x86,
- 0x59, 0xb0, 0xec, 0x3b, 0x84, 0x5f, 0x38, 0x92, 0x86, 0xbe, 0x0a, 0x25, 0xd6, 0x3b, 0x88, 0x52,
- 0x00, 0xe9, 0x12, 0xd1, 0x7b, 0xbb, 0x1f, 0xa3, 0xb0, 0x4e, 0x67, 0xbe, 0x09, 0xc5, 0x1d, 0xa7,
- 0x45, 0x7d, 0x51, 0x48, 0x3e, 0x01, 0x73, 0x2c, 0x51, 0x25, 0x45, 0x27, 0x19, 0xba, 0x6a, 0x88,
- 0xe7, 0x3e, 0xea, 0x59, 0x9e, 0x2f, 0x6b, 0xa1, 0x7c, 0xec, 0xa3, 0xaf, 0x72, 0x20, 0x96, 0xb8,
- 0xeb, 0xcb, 0x3c, 0xcb, 0xf8, 0xe9, 0xfb, 0xb5, 0x99, 0x77, 0xdf, 0xaf, 0xcd, 0xbc, 0xf7, 0xbe,
- 0xca, 0x38, 0xfe, 0x00, 0x00, 0xbb, 0x07, 0xdf, 0x23, 0x2d, 0x19, 0xbb, 0x53, 0xf5, 0x06, 0xc3,
- 0x96, 0xb4, 0xe8, 0x0d, 0x66, 0x86, 0x32, 0x47, 0x0d, 0x87, 0x13, 0x94, 0x68, 0x1d, 0x8a, 0x51,
- 0xd7, 0x4f, 0xf9, 0xc7, 0x52, 0xe8, 0x6f, 0x51, 0x6b, 0x10, 0xc7, 0x34, 0x89, 0x87, 0x24, 0x77,
- 0xee, 0x43, 0xd2, 0x80, 0x6c, 0xcf, 0xb1, 0x55, 0xd5, 0xfd, 0x74, 0xf8, 0x90, 0xdf, 0xd9, 0x6e,
- 0x9e, 0x0d, 0x6a, 0x8f, 0x4c, 0x6a, 0xb6, 0x07, 0xfd, 0x2e, 0x61, 0xf5, 0x3b, 0xdb, 0x4d, 0xcc,
- 0x99, 0xc7, 0x45, 0xb5, 0xd9, 0x29, 0xa3, 0xda, 0x55, 0x80, 0x76, 0xdc, 0xbb, 0x90, 0x41, 0x23,
- 0x72, 0x44, 0xad, 0x67, 0xa1, 0x51, 0x21, 0x06, 0x4b, 0x2d, 0x5e, 0xdf, 0xab, 0x1e, 0x02, 0x0b,
- 0xac, 0x8e, 0xec, 0x86, 0x4e, 0x77, 0x27, 0x2e, 0x29, 0x35, 0x4b, 0x9b, 0xc3, 0xc2, 0xf0, 0xa8,
- 0x7c, 0xe4, 0xc3, 0x92, 0xad, 0xca, 0xcc, 0x58, 0x69, 0x71, 0x6a, 0xa5, 0x22, 0x62, 0x35, 0x87,
- 0x05, 0xe1, 0x51, 0xd9, 0xe8, 0xbb, 0xb0, 0x12, 0x02, 0x47, 0x6b, 0x7d, 0x11, 0xf5, 0xb3, 0x8d,
- 0xd5, 0xd3, 0x41, 0x6d, 0xa5, 0x39, 0x91, 0x0a, 0xdf, 0x47, 0x02, 0xb2, 0x61, 0xd6, 0x95, 0x59,
- 0x72, 0x49, 0x64, 0x36, 0x5f, 0x4b, 0xb7, 0x8a, 0xd8, 0xfb, 0xeb, 0x7a, 0x76, 0x1c, 0xf5, 0x6d,
- 0x54, 0x62, 0xac, 0x64, 0xa3, 0xb7, 0xa0, 0x64, 0x79, 0x9e, 0x1f, 0x58, 0xb2, 0xfb, 0x50, 0x16,
- 0xaa, 0x36, 0xa6, 0x56, 0xb5, 0x11, 0xcb, 0x18, 0xca, 0xc6, 0x35, 0x0c, 0xd6, 0x55, 0xa1, 0x7b,
- 0xb0, 0xe0, 0xdf, 0xf3, 0x08, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb5, 0x08, 0xab, 0x56, 0x84, 0xf6,
- 0x67, 0x52, 0x6a, 0x4f, 0x30, 0xc7, 0x2e, 0x9d, 0x84, 0x33, 0x3c, 0xac, 0x05, 0xd5, 0x79, 0x6c,
- 0xf5, 0x2c, 0xd7, 0xf9, 0x3e, 0xa1, 0xac, 0x3a, 0x1f, 0x37, 0xac, 0xb7, 0x22, 0x28, 0xd6, 0x28,
- 0x50, 0x0f, 0x2a, 0x1d, 0xfd, 0xc9, 0xa8, 0x2e, 0x09, 0x33, 0xaf, 0xa5, 0x33, 0x73, 0xf4, 0x51,
- 0x8b, 0xd3, 0xa0, 0x04, 0x0e, 0x27, 0xb5, 0xac, 0x3c, 0x07, 0xa5, 0x4f, 0x59, 0x21, 0xf0, 0x0a,
- 0x63, 0xf8, 0x40, 0xa6, 0xaa, 0x30, 0xfe, 0x98, 0x81, 0xf9, 0xe4, 0x36, 0x0e, 0x3d, 0x87, 0xf9,
- 0x54, 0xcf, 0x61, 0x58, 0xcb, 0x1a, 0x13, 0x27, 0x17, 0x61, 0x7c, 0xce, 0x4e, 0x8c, 0xcf, 0x2a,
- 0x0c, 0xe6, 0x1e, 0x24, 0x0c, 0xd6, 0x01, 0x78, 0xb2, 0x42, 0x7d, 0xd7, 0x25, 0x54, 0x44, 0xc0,
- 0x82, 0x9a, 0x50, 0x44, 0x50, 0xac, 0x51, 0xf0, 0x94, 0xfa, 0xc0, 0xf5, 0x5b, 0xc7, 0x62, 0x0b,
- 0xc2, 0xdb, 0x2b, 0x62, 0x5f, 0x41, 0xa6, 0xd4, 0x8d, 0x11, 0x2c, 0x1e, 0xc3, 0x61, 0xf6, 0xe1,
- 0xe2, 0x9e, 0x45, 0x79, 0x92, 0x13, 0xdf, 0x14, 0x51, 0xb3, 0xbc, 0x31, 0x52, 0x11, 0x3d, 0x3d,
- 0xed, 0x8d, 0x8b, 0x37, 0x3f, 0x86, 0xc5, 0x55, 0x91, 0xf9, 0x57, 0x03, 0x2e, 0x8d, 0xd5, 0xfd,
- 0x19, 0x54, 0x64, 0x6f, 0x24, 0x2b, 0xb2, 0xe7, 0x53, 0xb6, 0x32, 0xc7, 0x59, 0x3b, 0xa1, 0x3e,
- 0x9b, 0x83, 0xfc, 0x1e, 0xcf, 0x84, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, 0x5f, 0xd3, 0x74, 0x92, 0x6b,
- 0xc9, 0x01, 0x43, 0xf1, 0xe1, 0x0d, 0x17, 0x1e, 0x46, 0xab, 0xf9, 0x1d, 0x03, 0x92, 0x3d, 0x5c,
- 0xf4, 0xa2, 0xbc, 0x02, 0x46, 0xd4, 0x64, 0x9d, 0xd2, 0xfd, 0x5f, 0x98, 0x54, 0x92, 0x5e, 0x48,
- 0xd5, 0xad, 0x7c, 0x12, 0x8a, 0xd8, 0xf7, 0x83, 0x3d, 0x2b, 0x38, 0x62, 0x7c, 0xef, 0xba, 0xfc,
- 0x87, 0xda, 0x5e, 0xb1, 0x77, 0x02, 0x83, 0x25, 0xdc, 0xfc, 0xb9, 0x01, 0x97, 0x26, 0xce, 0x8d,
- 0x78, 0x14, 0x69, 0x45, 0x5f, 0x6a, 0x45, 0x91, 0x23, 0xc7, 0x74, 0x58, 0xa3, 0xe2, 0xb5, 0x64,
- 0x62, 0xd8, 0x34, 0x5c, 0x4b, 0x26, 0xb4, 0xe1, 0x24, 0xad, 0xf9, 0xcf, 0x0c, 0xa8, 0x41, 0xcd,
- 0xff, 0xd8, 0xe9, 0x1f, 0x1f, 0x1a, 0x13, 0xcd, 0x27, 0xc7, 0x44, 0xd1, 0x4c, 0x48, 0x9b, 0x93,
- 0x64, 0xef, 0x3f, 0x27, 0x41, 0xcf, 0x46, 0xa3, 0x17, 0xe9, 0x43, 0xab, 0xc9, 0xd1, 0xcb, 0xd9,
- 0xa0, 0x56, 0x56, 0xc2, 0x93, 0xa3, 0x98, 0xd7, 0x60, 0xce, 0x26, 0x81, 0xe5, 0xb8, 0xb2, 0x2e,
- 0x4c, 0x3d, 0x4c, 0x90, 0xc2, 0x9a, 0x92, 0xb5, 0x51, 0xe2, 0x36, 0xa9, 0x0f, 0x1c, 0x0a, 0xe4,
- 0x01, 0xbb, 0xe5, 0xdb, 0xb2, 0x22, 0xc9, 0xc7, 0x01, 0x7b, 0xd3, 0xb7, 0x09, 0x16, 0x18, 0xf3,
- 0x5d, 0x03, 0x4a, 0x52, 0xd2, 0xa6, 0xd5, 0x63, 0x04, 0x5d, 0x89, 0x56, 0x21, 0x8f, 0xfb, 0x92,
- 0x3e, 0x63, 0x3b, 0x1b, 0xd4, 0x8a, 0x82, 0x4c, 0x14, 0x33, 0x63, 0x66, 0x49, 0x99, 0x73, 0xf6,
- 0xe8, 0x51, 0xc8, 0x8b, 0x0b, 0xa4, 0x36, 0x33, 0x1e, 0x16, 0x72, 0x20, 0x96, 0x38, 0xf3, 0xe3,
- 0x0c, 0x54, 0x12, 0x8b, 0x4b, 0x51, 0x17, 0x44, 0x2d, 0xd4, 0x4c, 0x8a, 0xb6, 0xfc, 0xe4, 0xd1,
- 0xbc, 0x7a, 0xbe, 0x66, 0x1f, 0xe4, 0xf9, 0xfa, 0x36, 0xcc, 0xb6, 0xf8, 0x1e, 0x85, 0xff, 0xf4,
- 0xb8, 0x32, 0xcd, 0x71, 0x8a, 0xdd, 0x8d, 0xbd, 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x9b, 0xb0,
- 0x44, 0x49, 0x40, 0xfb, 0x1b, 0x87, 0x01, 0xa1, 0x7a, 0x33, 0x21, 0x1f, 0x67, 0xdf, 0x78, 0x98,
- 0x00, 0x8f, 0xf2, 0x98, 0x07, 0x50, 0xbe, 0x6d, 0x1d, 0xb8, 0xd1, 0x78, 0x0c, 0x43, 0xc5, 0xf1,
- 0x5a, 0x6e, 0xcf, 0x26, 0x32, 0xa0, 0x87, 0xd1, 0x2b, 0xbc, 0xb4, 0xdb, 0x3a, 0xf2, 0x6c, 0x50,
- 0xbb, 0x90, 0x00, 0xc8, 0x79, 0x10, 0x4e, 0x8a, 0x30, 0x5d, 0xc8, 0x7d, 0x86, 0x95, 0xe4, 0x77,
- 0xa0, 0x18, 0xe7, 0xfa, 0x0f, 0x59, 0xa5, 0xf9, 0x06, 0x14, 0xb8, 0xc7, 0x87, 0x35, 0xea, 0x39,
- 0x59, 0x52, 0x32, 0xf7, 0xca, 0xa4, 0xc9, 0xbd, 0xc4, 0x90, 0xf5, 0x4e, 0xd7, 0x7e, 0xc0, 0x21,
- 0x6b, 0xe6, 0x41, 0x5e, 0xbe, 0xec, 0x94, 0x2f, 0xdf, 0x55, 0x90, 0x7f, 0x44, 0xe1, 0x8f, 0x8c,
- 0x4c, 0x20, 0xb4, 0x47, 0x46, 0x7f, 0xff, 0xb5, 0x09, 0xc3, 0x8f, 0x0d, 0x00, 0xd1, 0xca, 0x13,
- 0x6d, 0xa4, 0x14, 0xe3, 0xfc, 0x3b, 0x30, 0xeb, 0x4b, 0x8f, 0x94, 0x83, 0xd6, 0x29, 0xfb, 0xc5,
- 0xd1, 0x45, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xc6, 0xcb, 0x1f, 0x7c, 0xb2, 0x3a, 0xf3, 0xe1, 0x27,
- 0xab, 0x33, 0x1f, 0x7d, 0xb2, 0x3a, 0xf3, 0xf6, 0xe9, 0xaa, 0xf1, 0xc1, 0xe9, 0xaa, 0xf1, 0xe1,
- 0xe9, 0xaa, 0xf1, 0xd1, 0xe9, 0xaa, 0xf1, 0xf1, 0xe9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0x67, 0x5e,
- 0x7b, 0x2c, 0xcd, 0x1f, 0xfc, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x28, 0x27, 0x65, 0xab, 0x20,
- 0x28, 0x00, 0x00,
-}
+func (m *TypeMeta) Reset() { *m = TypeMeta{} }
+
+func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
+
+func (m *Verbs) Reset() { *m = Verbs{} }
+
+func (m *WatchEvent) Reset() { *m = WatchEvent{} }
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1954,6 +570,16 @@ func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ i--
+ if *m.IgnoreStoreReadErrorWithClusterBreakingPotential {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
if len(m.DryRun) > 0 {
for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.DryRun[iNdEx])
@@ -2026,6 +652,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Values) > 0 {
+ for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Values[iNdEx])
+ copy(dAtA[i:], m.Values[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Operator)
+ copy(dAtA[i:], m.Operator)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2331,7 +999,7 @@ func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.MatchLabels {
keysForMatchLabels = append(keysForMatchLabels, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
+ sort.Strings(keysForMatchLabels)
for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- {
v := m.MatchLabels[string(keysForMatchLabels[iNdEx])]
baseI := i
@@ -2703,7 +1371,7 @@ func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Annotations {
keysForAnnotations = append(keysForAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ sort.Strings(keysForAnnotations)
for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.Annotations[string(keysForAnnotations[iNdEx])]
baseI := i
@@ -2727,7 +1395,7 @@ func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Labels {
keysForLabels = append(keysForLabels, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ sort.Strings(keysForLabels)
for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
v := m.Labels[string(keysForLabels[iNdEx])]
baseI := i
@@ -3702,6 +2370,9 @@ func (m *DeleteOptions) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ n += 2
+ }
return n
}
@@ -3715,6 +2386,25 @@ func (m *Duration) Size() (n int) {
return n
}
+func (m *FieldSelectorRequirement) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
func (m *FieldsV1) Size() (n int) {
if m == nil {
return 0
@@ -4416,6 +3106,7 @@ func (this *DeleteOptions) String() string {
`OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
`PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+ `IgnoreStoreReadErrorWithClusterBreakingPotential:` + valueToStringGenerated(this.IgnoreStoreReadErrorWithClusterBreakingPotential) + `,`,
`}`,
}, "")
return s
@@ -4430,6 +3121,18 @@ func (this *Duration) String() string {
}, "")
return s
}
+func (this *FieldSelectorRequirement) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&FieldSelectorRequirement{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+ `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *GetOptions) String() string {
if this == nil {
return "nil"
@@ -4464,7 +3167,7 @@ func (this *LabelSelector) String() string {
for k := range this.MatchLabels {
keysForMatchLabels = append(keysForMatchLabels, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
+ sort.Strings(keysForMatchLabels)
mapStringForMatchLabels := "map[string]string{"
for _, k := range keysForMatchLabels {
mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k])
@@ -4571,7 +3274,7 @@ func (this *ObjectMeta) String() string {
for k := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ sort.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -4581,7 +3284,7 @@ func (this *ObjectMeta) String() string {
for k := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ sort.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
@@ -6354,6 +5057,27 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
}
m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreStoreReadErrorWithClusterBreakingPotential", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.IgnoreStoreReadErrorWithClusterBreakingPotential = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6444,6 +5168,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *FieldsV1) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index a2cd8015fb..fb21b72368 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -34,6 +34,7 @@ message APIGroup {
optional string name = 1;
// versions are the versions supported in this group.
+ // +listType=atomic
repeated GroupVersionForDiscovery versions = 2;
// preferredVersion is the version preferred by the API server, which
@@ -49,6 +50,7 @@ message APIGroup {
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
// +optional
+ // +listType=atomic
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4;
}
@@ -56,6 +58,7 @@ message APIGroup {
// /apis.
message APIGroupList {
// groups is a list of APIGroup.
+ // +listType=atomic
repeated APIGroup groups = 1;
}
@@ -88,9 +91,11 @@ message APIResource {
optional Verbs verbs = 4;
// shortNames is a list of suggested short names of the resource.
+ // +listType=atomic
repeated string shortNames = 5;
// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
+ // +listType=atomic
repeated string categories = 7;
// The hash value of the storage version, the version this resource is
@@ -112,6 +117,7 @@ message APIResourceList {
optional string groupVersion = 1;
// resources contains the name of the resources and if they are namespaced.
+ // +listType=atomic
repeated APIResource resources = 2;
}
@@ -122,6 +128,7 @@ message APIResourceList {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message APIVersions {
// versions are the api versions that are available.
+ // +listType=atomic
repeated string versions = 1;
// a map of client CIDR to server address that is serving this group.
@@ -131,6 +138,7 @@ message APIVersions {
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+ // +listType=atomic
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
}
@@ -145,6 +153,7 @@ message ApplyOptions {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
repeated string dryRun = 1;
// Force is going to "force" Apply requests. It means user will
@@ -235,6 +244,7 @@ message CreateOptions {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
repeated string dryRun = 1;
// fieldManager is a name associated with the actor or entity
@@ -303,7 +313,23 @@ message DeleteOptions {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
repeated string dryRun = 5;
+
+ // if set to true, it will trigger an unsafe deletion of the resource in
+ // case the normal deletion flow fails with a corrupt object error.
+ // A resource is considered corrupt if it can not be retrieved from
+ // the underlying storage successfully because of a) its data can
+ // not be transformed e.g. decryption failure, or b) it fails
+ // to decode into an object.
+ // NOTE: unsafe deletion ignores finalizer constraints, skips
+ // precondition checks, and removes the object from the storage.
+ // WARNING: This may potentially break the cluster if the workload
+ // associated with the resource being unsafe-deleted relies on normal
+ // deletion flow. Use only if you REALLY know what you are doing.
+ // The default value is false, and the user must opt in to enable it
+ // +optional
+ optional bool ignoreStoreReadErrorWithClusterBreakingPotential = 6;
}
// Duration is a wrapper around time.Duration which supports correct
@@ -313,6 +339,25 @@ message Duration {
optional int64 duration = 1;
}
+// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message FieldSelectorRequirement {
+ // key is the field selector key that the requirement applies to.
+ optional string key = 1;
+
+ // operator represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist.
+ // The list of operators may grow in the future.
+ optional string operator = 2;
+
+ // values is an array of string values.
+ // If the operator is In or NotIn, the values array must be non-empty.
+ // If the operator is Exists or DoesNotExist, the values array must be empty.
+ // +optional
+ // +listType=atomic
+ repeated string values = 3;
+}
+
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
//
// Each key is either a '.' representing the field itself, and will always map to an empty set,
@@ -418,6 +463,7 @@ message LabelSelector {
// matchExpressions is a list of label selector requirements. The requirements are ANDed.
// +optional
+ // +listType=atomic
repeated LabelSelectorRequirement matchExpressions = 2;
}
@@ -436,6 +482,7 @@ message LabelSelectorRequirement {
// the values array must be empty. This array is replaced during a strategic
// merge patch.
// +optional
+ // +listType=atomic
repeated string values = 3;
}
@@ -447,7 +494,7 @@ message List {
optional ListMeta metadata = 1;
// List of objects
- repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
+ repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
}
// ListMeta describes metadata that synthetic resources must have, including lists and
@@ -788,6 +835,8 @@ message ObjectMeta {
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=uid
repeated OwnerReference ownerReferences = 13;
// Must be empty before the object is deleted from the registry. Each entry
@@ -805,6 +854,7 @@ message ObjectMeta {
// are not vulnerable to ordering changes in the list.
// +optional
// +patchStrategy=merge
+ // +listType=set
repeated string finalizers = 14;
// ManagedFields maps workflow-id and version to the set of fields
@@ -816,6 +866,7 @@ message ObjectMeta {
// workflow used when modifying the object.
//
// +optional
+ // +listType=atomic
repeated ManagedFieldsEntry managedFields = 17;
}
@@ -890,6 +941,7 @@ message PatchOptions {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
repeated string dryRun = 1;
// Force is going to "force" Apply requests. It means user will
@@ -943,6 +995,7 @@ message Preconditions {
// For example: "/healthz", "/apis".
message RootPaths {
// paths are the paths available at root.
+ // +listType=atomic
repeated string paths = 1;
}
@@ -1049,6 +1102,7 @@ message StatusDetails {
// The Causes array includes more details associated with the StatusReason
// failure. Not all StatusReasons may provide detailed causes.
// +optional
+ // +listType=atomic
repeated StatusCause causes = 4;
// If specified, the time in seconds before the operation should be retried. Some errors may indicate
@@ -1059,6 +1113,7 @@ message StatusDetails {
}
// TableOptions are used when a Table is requested by the caller.
+// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message TableOptions {
// includeObject decides whether to include each object along with its columnar information.
@@ -1135,6 +1190,7 @@ message UpdateOptions {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
repeated string dryRun = 1;
// fieldManager is a name associated with the actor or entity
@@ -1187,6 +1243,6 @@ message WatchEvent {
// * If Type is Deleted: the state of the object immediately before deletion.
// * If Type is Error: *Status is recommended; other types may make sense
// depending on context.
- optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.protomessage.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.protomessage.pb.go
new file mode 100644
index 0000000000..459ae1ad87
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.protomessage.pb.go
@@ -0,0 +1,112 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*APIGroup) ProtoMessage() {}
+
+func (*APIGroupList) ProtoMessage() {}
+
+func (*APIResource) ProtoMessage() {}
+
+func (*APIResourceList) ProtoMessage() {}
+
+func (*APIVersions) ProtoMessage() {}
+
+func (*ApplyOptions) ProtoMessage() {}
+
+func (*Condition) ProtoMessage() {}
+
+func (*CreateOptions) ProtoMessage() {}
+
+func (*DeleteOptions) ProtoMessage() {}
+
+func (*Duration) ProtoMessage() {}
+
+func (*FieldSelectorRequirement) ProtoMessage() {}
+
+func (*FieldsV1) ProtoMessage() {}
+
+func (*GetOptions) ProtoMessage() {}
+
+func (*GroupKind) ProtoMessage() {}
+
+func (*GroupResource) ProtoMessage() {}
+
+func (*GroupVersion) ProtoMessage() {}
+
+func (*GroupVersionForDiscovery) ProtoMessage() {}
+
+func (*GroupVersionKind) ProtoMessage() {}
+
+func (*GroupVersionResource) ProtoMessage() {}
+
+func (*LabelSelector) ProtoMessage() {}
+
+func (*LabelSelectorRequirement) ProtoMessage() {}
+
+func (*List) ProtoMessage() {}
+
+func (*ListMeta) ProtoMessage() {}
+
+func (*ListOptions) ProtoMessage() {}
+
+func (*ManagedFieldsEntry) ProtoMessage() {}
+
+func (*MicroTime) ProtoMessage() {}
+
+func (*ObjectMeta) ProtoMessage() {}
+
+func (*OwnerReference) ProtoMessage() {}
+
+func (*PartialObjectMetadata) ProtoMessage() {}
+
+func (*PartialObjectMetadataList) ProtoMessage() {}
+
+func (*Patch) ProtoMessage() {}
+
+func (*PatchOptions) ProtoMessage() {}
+
+func (*Preconditions) ProtoMessage() {}
+
+func (*RootPaths) ProtoMessage() {}
+
+func (*ServerAddressByClientCIDR) ProtoMessage() {}
+
+func (*Status) ProtoMessage() {}
+
+func (*StatusCause) ProtoMessage() {}
+
+func (*StatusDetails) ProtoMessage() {}
+
+func (*TableOptions) ProtoMessage() {}
+
+func (*Time) ProtoMessage() {}
+
+func (*Timestamp) ProtoMessage() {}
+
+func (*TypeMeta) ProtoMessage() {}
+
+func (*UpdateOptions) ProtoMessage() {}
+
+func (*Verbs) ProtoMessage() {}
+
+func (*WatchEvent) ProtoMessage() {}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
index 592dcb8a74..c748071ed7 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
@@ -24,8 +24,10 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
+ utiljson "k8s.io/apimachinery/pkg/util/json"
)
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
@@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) {
if f.Raw == nil {
return []byte("null"), nil
}
+ if f.getContentType() == fieldsV1InvalidOrValidCBORObject {
+ var u map[string]interface{}
+ if err := cbor.Unmarshal(f.Raw, &u); err != nil {
+ return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err)
+ }
+ return utiljson.Marshal(u)
+ }
return f.Raw, nil
}
// UnmarshalJSON implements json.Unmarshaler
func (f *FieldsV1) UnmarshalJSON(b []byte) error {
if f == nil {
- return errors.New("metav1.Fields: UnmarshalJSON on nil pointer")
+ return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer")
}
if !bytes.Equal(b, []byte("null")) {
f.Raw = append(f.Raw[0:0], b...)
@@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error {
var _ json.Marshaler = FieldsV1{}
var _ json.Unmarshaler = &FieldsV1{}
+
+func (f FieldsV1) MarshalCBOR() ([]byte, error) {
+ if f.Raw == nil {
+ return cbor.Marshal(nil)
+ }
+ if f.getContentType() == fieldsV1InvalidOrValidJSONObject {
+ var u map[string]interface{}
+ if err := utiljson.Unmarshal(f.Raw, &u); err != nil {
+ return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err)
+ }
+ return cbor.Marshal(u)
+ }
+ return f.Raw, nil
+}
+
+var cborNull = []byte{0xf6}
+
+func (f *FieldsV1) UnmarshalCBOR(b []byte) error {
+ if f == nil {
+ return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer")
+ }
+ if !bytes.Equal(b, cborNull) {
+ f.Raw = append(f.Raw[0:0], b...)
+ }
+ return nil
+}
+
+const (
+ // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw
+ // bytes don't represent an allowable value in any supported encoding.
+ fieldsV1InvalidOrEmpty = iota
+
+ // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that
+ // are a valid JSON encoding of an allowable value or don't represent an allowable value in
+ // any supported encoding.
+ fieldsV1InvalidOrValidJSONObject
+
+ // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that
+ // are a valid CBOR encoding of an allowable value or don't represent an allowable value in
+ // any supported encoding.
+ fieldsV1InvalidOrValidCBORObject
+)
+
+// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject,
+// fieldsV1InvalidOrValidCBORObject based on the value of Raw.
+//
+// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map)
+// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty
+// and represents an allowable value, then the initial byte unambiguously distinguishes a
+// JSON-encoded value from a CBOR-encoded value.
+//
+// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first
+// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid
+// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map",
+// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or
+// 0xc6...0xdb). The two sets of valid initial bytes don't intersect.
+func (f FieldsV1) getContentType() int {
+ if len(f.Raw) > 0 {
+ p := f.Raw[0]
+ switch p {
+ case 'n', '{', '\t', '\r', '\n', ' ':
+ return fieldsV1InvalidOrValidJSONObject
+ case 0xf6: // null
+ return fieldsV1InvalidOrValidCBORObject
+ default:
+ if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ {
+ return fieldsV1InvalidOrValidCBORObject
+ }
+ }
+ }
+ return fieldsV1InvalidOrEmpty
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
index 8eb37f4367..9f302b3f36 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
@@ -19,6 +19,8 @@ package v1
import (
"encoding/json"
"time"
+
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
)
const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
@@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error {
return nil
}
+func (t *MicroTime) UnmarshalCBOR(b []byte) error {
+ var s *string
+ if err := cbor.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if s == nil {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ parsed, err := time.Parse(RFC3339Micro, *s)
+ if err != nil {
+ return err
+ }
+
+ t.Time = parsed.Local()
+ return nil
+}
+
// UnmarshalQueryParameter converts from a URL query parameter value to an object
func (t *MicroTime) UnmarshalQueryParameter(str string) error {
if len(str) == 0 {
@@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.UTC().Format(RFC3339Micro))
}
+func (t MicroTime) MarshalCBOR() ([]byte, error) {
+ if t.IsZero() {
+ return cbor.Marshal(nil)
+ }
+ return cbor.Marshal(t.UTC().Format(RFC3339Micro))
+}
+
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go
index 3cf9d48e96..a5f437b4b3 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go
@@ -20,21 +20,22 @@ limitations under the License.
package v1
import (
+ "math/rand"
"time"
- fuzz "github.com/google/gofuzz"
+ "sigs.k8s.io/randfill"
)
-// Fuzz satisfies fuzz.Interface.
-func (t *MicroTime) Fuzz(c fuzz.Continue) {
+// Fuzz satisfies randfill.SimpleSelfFiller.
+func (t *MicroTime) RandFill(r *rand.Rand) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Accurate to a tenth of
// micro second. Leave off nanoseconds because JSON doesn't
// represent them so they can't round-trip properly.
- t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
+ t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 1000*r.Int63n(1000000))
}
-// ensure MicroTime implements fuzz.Interface
-var _ fuzz.Interface = &MicroTime{}
+// ensure MicroTime implements randfill.Interface
+var _ randfill.SimpleSelfFiller = &MicroTime{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
index 421770d432..0333cfdb33 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
@@ -19,6 +19,8 @@ package v1
import (
"encoding/json"
"time"
+
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
)
// Time is a wrapper around time.Time which supports correct
@@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error {
return nil
}
+func (t *Time) UnmarshalCBOR(b []byte) error {
+ var s *string
+ if err := cbor.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if s == nil {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ parsed, err := time.Parse(time.RFC3339, *s)
+ if err != nil {
+ return err
+ }
+
+ t.Time = parsed.Local()
+ return nil
+}
+
// UnmarshalQueryParameter converts from a URL query parameter value to an object
func (t *Time) UnmarshalQueryParameter(str string) error {
if len(str) == 0 {
@@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) {
return buf, nil
}
+func (t Time) MarshalCBOR() ([]byte, error) {
+ if t.IsZero() {
+ return cbor.Marshal(nil)
+ }
+
+ return cbor.Marshal(t.UTC().Format(time.RFC3339))
+}
+
// ToUnstructured implements the value.UnstructuredConverter interface.
func (t Time) ToUnstructured() interface{} {
if t.IsZero() {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go
index bf9e21b5bd..48fb978450 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go
@@ -20,21 +20,22 @@ limitations under the License.
package v1
import (
+ "math/rand"
"time"
- fuzz "github.com/google/gofuzz"
+ "sigs.k8s.io/randfill"
)
-// Fuzz satisfies fuzz.Interface.
-func (t *Time) Fuzz(c fuzz.Continue) {
+// Fuzz satisfies randfill.SimpleSelfFiller.
+func (t *Time) RandFill(r *rand.Rand) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Leave off nanoseconds
// because JSON doesn't represent them so they can't round-trip
// properly.
- t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
+ t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 0)
}
-// ensure Time implements fuzz.Interface
-var _ fuzz.Interface = &Time{}
+// ensure Time implements randfill.SimpleSelfFiller
+var _ randfill.SimpleSelfFiller = &Time{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 8a8ff70189..9970e877d0 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -185,7 +185,7 @@ type ObjectMeta struct {
// Null for lists.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
- CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
+ CreationTimestamp Time `json:"creationTimestamp,omitempty,omitzero" protobuf:"bytes,8,opt,name=creationTimestamp"`
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
@@ -236,6 +236,8 @@ type ObjectMeta struct {
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=uid
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
// Must be empty before the object is deleted from the registry. Each entry
@@ -253,6 +255,7 @@ type ObjectMeta struct {
// are not vulnerable to ordering changes in the list.
// +optional
// +patchStrategy=merge
+ // +listType=set
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
// Tombstone: ClusterName was a legacy field that was always cleared by
@@ -268,6 +271,7 @@ type ObjectMeta struct {
// workflow used when modifying the object.
//
// +optional
+ // +listType=atomic
ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"`
}
@@ -428,6 +432,15 @@ type ListOptions struct {
SendInitialEvents *bool `json:"sendInitialEvents,omitempty" protobuf:"varint,11,opt,name=sendInitialEvents"`
}
+const (
+ // InitialEventsAnnotationKey the name of the key
+ // under which an annotation marking the end of
+ // a watchlist stream is stored.
+ //
+ // The annotation is added to a "Bookmark" event.
+ InitialEventsAnnotationKey = "k8s.io/initial-events-end"
+)
+
// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
// may only be set if resourceVersion is also set.
//
@@ -531,7 +544,23 @@ type DeleteOptions struct {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
+
+ // if set to true, it will trigger an unsafe deletion of the resource in
+ // case the normal deletion flow fails with a corrupt object error.
+ // A resource is considered corrupt if it can not be retrieved from
+ // the underlying storage successfully because of a) its data can
+ // not be transformed e.g. decryption failure, or b) it fails
+ // to decode into an object.
+ // NOTE: unsafe deletion ignores finalizer constraints, skips
+ // precondition checks, and removes the object from the storage.
+ // WARNING: This may potentially break the cluster if the workload
+ // associated with the resource being unsafe-deleted relies on normal
+ // deletion flow. Use only if you REALLY know what you are doing.
+ // The default value is false, and the user must opt in to enable it
+ // +optional
+ IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty" protobuf:"varint,6,opt,name=ignoreStoreReadErrorWithClusterBreakingPotential"`
}
const (
@@ -556,6 +585,7 @@ type CreateOptions struct {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
// +k8s:deprecated=includeUninitialized,protobuf=2
@@ -600,6 +630,7 @@ type PatchOptions struct {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
// Force is going to "force" Apply requests. It means user will
@@ -651,6 +682,7 @@ type ApplyOptions struct {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
// Force is going to "force" Apply requests. It means user will
@@ -683,6 +715,7 @@ type UpdateOptions struct {
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
+ // +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
// fieldManager is a name associated with the actor or entity
@@ -784,6 +817,7 @@ type StatusDetails struct {
// The Causes array includes more details associated with the StatusReason
// failure. Not all StatusReasons may provide detailed causes.
// +optional
+ // +listType=atomic
Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"`
// If specified, the time in seconds before the operation should be retried. Some errors may indicate
// the client must take an alternate action - for those errors this field may indicate how long to wait
@@ -882,6 +916,22 @@ const (
// Status code 500
StatusReasonServerTimeout StatusReason = "ServerTimeout"
+ // StatusReasonStoreReadError means that the server encountered an error while
+ // retrieving resources from the backend object store.
+ // This may be due to backend database error, or because processing of the read
+ // resource failed.
+ // Details:
+ // "kind" string - the kind attribute of the resource being acted on.
+ // "name" string - the prefix where the reading error(s) occurred
+ // "causes" []StatusCause
+ // - (optional):
+ // - "type" CauseType - CauseTypeUnexpectedServerResponse
+ // - "message" string - the error message from the store backend
+ // - "field" string - the full path with the key of the resource that failed reading
+ //
+ // Status code 500
+ StatusReasonStoreReadError StatusReason = "StorageReadError"
+
// StatusReasonTimeout means that the request could not be completed within the given time.
// Clients can get this response only when they specified a timeout param in the request,
// or if the server cannot complete the operation within a reasonable amount of time.
@@ -1047,6 +1097,7 @@ type List struct {
type APIVersions struct {
TypeMeta `json:",inline"`
// versions are the api versions that are available.
+ // +listType=atomic
Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"`
// a map of client CIDR to server address that is serving this group.
// This is to help clients reach servers in the most network-efficient way possible.
@@ -1055,6 +1106,7 @@ type APIVersions struct {
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+ // +listType=atomic
ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"`
}
@@ -1065,6 +1117,7 @@ type APIVersions struct {
type APIGroupList struct {
TypeMeta `json:",inline"`
// groups is a list of APIGroup.
+ // +listType=atomic
Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"`
}
@@ -1077,6 +1130,7 @@ type APIGroup struct {
// name is the name of the group.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// versions are the versions supported in this group.
+ // +listType=atomic
Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"`
// preferredVersion is the version preferred by the API server, which
// probably is the storage version.
@@ -1090,6 +1144,7 @@ type APIGroup struct {
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
// +optional
+ // +listType=atomic
ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"`
}
@@ -1134,8 +1189,10 @@ type APIResource struct {
// update, patch, delete, deletecollection, and proxy)
Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"`
// shortNames is a list of suggested short names of the resource.
+ // +listType=atomic
ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"`
// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
+ // +listType=atomic
Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"`
// The hash value of the storage version, the version this resource is
// converted to when written to the data store. Value must be treated
@@ -1168,6 +1225,7 @@ type APIResourceList struct {
// groupVersion is the group and version this APIResourceList is for.
GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`
// resources contains the name of the resources and if they are namespaced.
+ // +listType=atomic
APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"`
}
@@ -1175,6 +1233,7 @@ type APIResourceList struct {
// For example: "/healthz", "/apis".
type RootPaths struct {
// paths are the paths available at root.
+ // +listType=atomic
Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"`
}
@@ -1218,6 +1277,7 @@ type LabelSelector struct {
MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
// matchExpressions is a list of label selector requirements. The requirements are ANDed.
// +optional
+ // +listType=atomic
MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
}
@@ -1234,6 +1294,7 @@ type LabelSelectorRequirement struct {
// the values array must be empty. This array is replaced during a strategic
// merge patch.
// +optional
+ // +listType=atomic
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
@@ -1247,6 +1308,33 @@ const (
LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
)
+// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type FieldSelectorRequirement struct {
+ // key is the field selector key that the requirement applies to.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+ // operator represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist.
+ // The list of operators may grow in the future.
+ Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"`
+ // values is an array of string values.
+ // If the operator is In or NotIn, the values array must be non-empty.
+ // If the operator is Exists or DoesNotExist, the values array must be empty.
+ // +optional
+ // +listType=atomic
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A field selector operator is the set of operators that can be used in a selector requirement.
+type FieldSelectorOperator string
+
+const (
+ FieldSelectorOpIn FieldSelectorOperator = "In"
+ FieldSelectorOpNotIn FieldSelectorOperator = "NotIn"
+ FieldSelectorOpExists FieldSelectorOperator = "Exists"
+ FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist"
+)
+
// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
// that the fieldset applies to.
type ManagedFieldsEntry struct {
@@ -1335,8 +1423,10 @@ type Table struct {
// columnDefinitions describes each column in the returned items array. The number of cells per row
// will always match the number of column definitions.
+ // +listType=atomic
ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"`
// rows is the list of items in the table.
+ // +listType=atomic
Rows []TableRow `json:"rows"`
}
@@ -1369,12 +1459,14 @@ type TableRow struct {
// cells will be as wide as the column definitions array and may contain strings, numbers (float64 or
// int64), booleans, simple maps, lists, or null. See the type field of the column definition for a
// more detailed description.
+ // +listType=atomic
Cells []interface{} `json:"cells"`
// conditions describe additional status of a row that are relevant for a human user. These conditions
// apply to the row, not to the object, and will be specific to table output. The only defined
// condition type is 'Completed', for a row that indicates a resource that has run to completion and
// can be given less visual priority.
// +optional
+ // +listType=atomic
Conditions []TableRowCondition `json:"conditions,omitempty"`
// This field contains the requested additional information about each object based on the includeObject
// policy when requesting the Table. If "None", this field is empty, if "Object" this will be the
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
index b736e83712..405496d3df 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -129,12 +129,24 @@ var map_DeleteOptions = map[string]string{
"orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
"propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
"dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "ignoreStoreReadErrorWithClusterBreakingPotential": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it",
}
func (DeleteOptions) SwaggerDoc() map[string]string {
return map_DeleteOptions
}
+var map_FieldSelectorRequirement = map[string]string{
+ "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "key": "key is the field selector key that the requirement applies to.",
+ "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.",
+ "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.",
+}
+
+func (FieldSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_FieldSelectorRequirement
+}
+
var map_FieldsV1 = map[string]string{
"": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
index 0f58d66c09..59f43b7bff 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -20,6 +20,7 @@ import (
gojson "encoding/json"
"fmt"
"io"
+ "math/big"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -125,6 +126,29 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err
return i, true, nil
}
+// NestedNumberAsFloat64 returns the float64 value of a nested field. If the field's value is a
+// float64, it is returned. If the field's value is an int64 that can be losslessly converted to
+// float64, it will be converted and returned. Returns false if value is not found and an error if
+// not a float64 or an int64 that can be accurately represented as a float64.
+func NestedNumberAsFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return 0, found, err
+ }
+ switch x := val.(type) {
+ case int64:
+ f, accuracy := big.NewInt(x).Float64()
+ if accuracy != big.Exact {
+ return 0, false, fmt.Errorf("%v accessor error: int64 value %v cannot be losslessly converted to float64", jsonPath(fields), x)
+ }
+ return f, true, nil
+ case float64:
+ return x, true, nil
+ default:
+ return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64 or int64", jsonPath(fields), val, val)
+ }
+}
+
// NestedStringSlice returns a copy of []string value of a nested field.
// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
@@ -164,7 +188,7 @@ func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, b
// NestedStringMap returns a copy of map[string]string value of a nested field.
// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map.
func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
+ m, found, err := nestedMapNoCopy(obj, false, fields...)
if !found || err != nil {
return nil, found, err
}
@@ -179,10 +203,32 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s
return strMap, true, nil
}
+// NestedNullCoercingStringMap returns a copy of map[string]string value of a nested field.
+// Returns `nil, true, nil` if the value exists and is explicitly null.
+// Returns `nil, false, err` if the value is not a map or a null value, or is a map and contains non-string non-null values.
+// Null values in the map are coerced to "" to match json decoding behavior.
+func NestedNullCoercingStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
+ m, found, err := nestedMapNoCopy(obj, true, fields...)
+ if !found || err != nil || m == nil {
+ return nil, found, err
+ }
+ strMap := make(map[string]string, len(m))
+ for k, v := range m {
+ if str, ok := v.(string); ok {
+ strMap[k] = str
+ } else if v == nil {
+ strMap[k] = ""
+ } else {
+ return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v)
+ }
+ }
+ return strMap, true, nil
+}
+
// NestedMap returns a deep copy of map[string]interface{} value of a nested field.
// Returns false if value is not found and an error if not a map[string]interface{}.
func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
+ m, found, err := nestedMapNoCopy(obj, false, fields...)
if !found || err != nil {
return nil, found, err
}
@@ -191,11 +237,14 @@ func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interfa
// nestedMapNoCopy returns a map[string]interface{} value of a nested field.
// Returns false if value is not found and an error if not a map[string]interface{}.
-func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+func nestedMapNoCopy(obj map[string]interface{}, tolerateNil bool, fields ...string) (map[string]interface{}, bool, error) {
val, found, err := NestedFieldNoCopy(obj, fields...)
if !found || err != nil {
return nil, found, err
}
+ if val == nil && tolerateNil {
+ return nil, true, nil
+ }
m, ok := val.(map[string]interface{})
if !ok {
return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val)
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
index 40d289f375..fdb0c86297 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -397,7 +397,7 @@ func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds
}
func (u *Unstructured) GetLabels() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "labels")
+ m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "labels")
return m
}
@@ -410,7 +410,7 @@ func (u *Unstructured) SetLabels(labels map[string]string) {
}
func (u *Unstructured) GetAnnotations() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "annotations")
+ m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "annotations")
return m
}
@@ -450,10 +450,14 @@ func (u *Unstructured) SetFinalizers(finalizers []string) {
}
func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
- items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
+ v, found, err := NestedFieldNoCopy(u.Object, "metadata", "managedFields")
if !found || err != nil {
return nil
}
+ items, ok := v.([]interface{})
+ if !ok {
+ return nil
+ }
managedFields := []metav1.ManagedFieldsEntry{}
for _, item := range items {
m, ok := item.(map[string]interface{})
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go
index cff91d3da6..f8f5ec8560 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go
@@ -50,11 +50,11 @@ func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.Serial
MediaTypeType: "application",
MediaTypeSubType: "json",
EncodesAsText: true,
- Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false),
- PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, true),
+ Serializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, s.creator, s.typer, json.SerializerOptions{}),
+ PrettySerializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, s.creator, s.typer, json.SerializerOptions{Pretty: true}),
StreamSerializer: &runtime.StreamSerializerInfo{
EncodesAsText: true,
- Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false),
+ Serializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, s.creator, s.typer, json.SerializerOptions{}),
Framer: json.Framer,
},
},
@@ -63,7 +63,7 @@ func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.Serial
MediaTypeType: "application",
MediaTypeSubType: "yaml",
EncodesAsText: true,
- Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer),
+ Serializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, s.creator, s.typer, json.SerializerOptions{Yaml: true}),
},
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
index a0f709ad86..d0ca20013a 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
@@ -26,12 +26,18 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
+
+ "k8s.io/utils/ptr"
)
// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options
type LabelSelectorValidationOptions struct {
// Allow invalid label value in selector
AllowInvalidLabelValueInSelector bool
+
+ // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check
+ // can be performed, as in a *SubjectAccessReview
+ AllowUnknownOperatorInRequirement bool
}
// LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression.
@@ -79,7 +85,9 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L
allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
}
default:
- allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
+ if !opts.AllowUnknownOperatorInRequirement {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
+ }
}
allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
if !opts.AllowInvalidLabelValueInSelector {
@@ -96,7 +104,7 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L
func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(labelName) {
- allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg))
+ allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg).WithOrigin("format=k8s-label-key"))
}
return allErrs
}
@@ -107,12 +115,45 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi
for k, v := range labels {
allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
for _, msg := range validation.IsValidLabelValue(v) {
- allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
+ allErrs = append(allErrs, field.Invalid(fldPath, v, msg).WithOrigin("format=k8s-label-value"))
}
}
return allErrs
}
+// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options
+type FieldSelectorValidationOptions struct {
+ // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check
+ // can be performed, as in a *SubjectAccessReview
+ AllowUnknownOperatorInRequirement bool
+}
+
+// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors.
+func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(requirement.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified"))
+ }
+
+ switch requirement.Operator {
+ case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn:
+ if len(requirement.Values) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
+ }
+ case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist:
+ if len(requirement.Values) > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
+ }
+ default:
+ if !opts.AllowUnknownOperatorInRequirement {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator"))
+ }
+ }
+
+ return allErrs
+}
+
func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList {
allErrs := field.ErrorList{}
//lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed
@@ -126,6 +167,7 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList {
allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"}))
}
allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
+ allErrs = append(allErrs, ValidateIgnoreStoreReadError(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), options)...)
return allErrs
}
@@ -147,15 +189,16 @@ func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList {
func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList {
allErrs := field.ErrorList{}
- if patchType != types.ApplyPatchType {
- if options.Force != nil {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch"))
- }
- } else {
+ switch patchType {
+ case types.ApplyYAMLPatchType, types.ApplyCBORPatchType:
if options.FieldManager == "" {
// This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers.
allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch"))
}
+ default:
+ if options.Force != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch"))
+ }
}
allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...)
allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
@@ -173,7 +216,7 @@ func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorL
// considered as not set and is defaulted by the rest of the process
// (unless apply is used, in which case it is required).
if len(fieldManager) > FieldManagerMaxLength {
- allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength))
+ allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, FieldManagerMaxLength))
}
// Verify that all characters are printable.
for i, r := range fieldManager {
@@ -238,7 +281,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel
allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...)
if len(fields.Subresource) > MaxSubresourceNameLength {
- allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength))
+ allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), "" /*unused*/, MaxSubresourceNameLength))
}
}
return allErrs
@@ -285,22 +328,22 @@ func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.Er
}
if condition.LastTransitionTime.IsZero() {
- allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set"))
+ allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), ""))
}
if len(condition.Reason) == 0 {
- allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set"))
+ allErrs = append(allErrs, field.Required(fldPath.Child("reason"), ""))
} else {
for _, currErr := range isValidConditionReason(condition.Reason) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr))
}
if len(condition.Reason) > maxReasonLen {
- allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen))
+ allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), "" /*unused*/, maxReasonLen))
}
}
if len(condition.Message) > maxMessageLen {
- allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen))
+ allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), "" /*unused*/, maxMessageLen))
}
return allErrs
@@ -318,3 +361,31 @@ func isValidConditionReason(value string) []string {
}
return nil
}
+
+// ValidateIgnoreStoreReadError validates that delete options are valid when
+// ignoreStoreReadErrorWithClusterBreakingPotential is enabled
+func ValidateIgnoreStoreReadError(fldPath *field.Path, options *metav1.DeleteOptions) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if enabled := ptr.Deref[bool](options.IgnoreStoreReadErrorWithClusterBreakingPotential, false); !enabled {
+ return allErrs
+ }
+
+ if len(options.DryRun) > 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .dryRun"))
+ }
+ if options.PropagationPolicy != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .propagationPolicy"))
+ }
+ //nolint:staticcheck // Keep validation for deprecated OrphanDependents option until it's being removed
+ if options.OrphanDependents != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .orphanDependents"))
+ }
+ if options.GracePeriodSeconds != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .gracePeriodSeconds"))
+ }
+ if options.Preconditions != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .preconditions"))
+ }
+
+ return allErrs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
index afe01ed5a4..82e2722404 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
@@ -339,6 +339,13 @@ func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptio
} else {
out.DryRun = nil
}
+ if values, ok := map[string][]string(*in)["ignoreStoreReadErrorWithClusterBreakingPotential"]; ok && len(values) > 0 {
+ if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.IgnoreStoreReadErrorWithClusterBreakingPotential, s); err != nil {
+ return err
+ }
+ } else {
+ out.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
+ }
return nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
index 7d29c504ab..6b0d0dfee9 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -290,6 +290,11 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ in, out := &in.IgnoreStoreReadErrorWithClusterBreakingPotential, &out.IgnoreStoreReadErrorWithClusterBreakingPotential
+ *out = new(bool)
+ **out = **in
+ }
return
}
@@ -327,6 +332,27 @@ func (in *Duration) DeepCopy() *Duration {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement.
+func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement {
+ if in == nil {
+ return nil
+ }
+ out := new(FieldSelectorRequirement)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldsV1) DeepCopyInto(out *FieldsV1) {
*out = *in
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.model_name.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.model_name.go
new file mode 100644
index 0000000000..fd6e876ece
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.model_name.go
@@ -0,0 +1,267 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroup) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroupList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIResource) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIResourceList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIVersions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ApplyOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ApplyOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Condition) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Condition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CreateOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeleteOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Duration) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FieldSelectorRequirement) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FieldsV1) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GetOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GetOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupKind) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupKind"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupResource) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupResource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersion) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersion"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersionForDiscovery) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersionKind) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionKind"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersionResource) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionResource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in InternalEvent) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.InternalEvent"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LabelSelector) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LabelSelectorRequirement) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in List) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.List"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ListMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ListOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ListOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ManagedFieldsEntry) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MicroTime) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in OwnerReference) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PartialObjectMetadata) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PartialObjectMetadataList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Patch) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PatchOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.PatchOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Preconditions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RootPaths) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.RootPaths"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServerAddressByClientCIDR) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Status) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatusCause) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatusDetails) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Table) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Table"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableColumnDefinition) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableColumnDefinition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableRow) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableRow"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableRowCondition) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableRowCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Time) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Timestamp) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Timestamp"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypeMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TypeMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UpdateOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.UpdateOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WatchEvent) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
index 20c9d2ec73..159ca0573f 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
@@ -17,7 +17,8 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.apis.meta.v1beta1
// +groupName=meta.k8s.io
-package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
index a2abc67c15..3c763898e7 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
+// source: k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
package v1beta1
@@ -24,85 +24,14 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
-func (*PartialObjectMetadataList) ProtoMessage() {}
-func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
- return fileDescriptor_90ec10f86b91f9a8, []int{0}
-}
-func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
-}
-func (m *PartialObjectMetadataList) XXX_Size() int {
- return m.Size()
-}
-func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
- xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8)
-}
-
-var fileDescriptor_90ec10f86b91f9a8 = []byte{
- // 317 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0xf3, 0x30,
- 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x30, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xdb, 0xc1,
- 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x7f, 0xbb, 0x58,
- 0xd3, 0x94, 0xe4, 0xdf, 0x81, 0x37, 0x3f, 0x82, 0x1f, 0x6b, 0xc7, 0x1d, 0x07, 0xc2, 0x70, 0xf5,
- 0x8b, 0x48, 0xda, 0x2a, 0x32, 0x14, 0x7a, 0xeb, 0xf3, 0x94, 0xdf, 0x2f, 0x4f, 0x20, 0xfe, 0x2c,
- 0x3e, 0xb7, 0x4c, 0x6a, 0x1e, 0x67, 0x01, 0x98, 0x04, 0x10, 0x2c, 0x5f, 0x42, 0x32, 0xd7, 0x86,
- 0x57, 0x3f, 0x44, 0x2a, 0x95, 0x08, 0x17, 0x32, 0x01, 0xf3, 0xcc, 0xd3, 0x38, 0x72, 0x85, 0xe5,
- 0x0a, 0x50, 0xf0, 0xe5, 0x28, 0x00, 0x14, 0x23, 0x1e, 0x41, 0x02, 0x46, 0x20, 0xcc, 0x59, 0x6a,
- 0x34, 0xea, 0xf6, 0xb0, 0x44, 0xd9, 0x4f, 0x94, 0xa5, 0x71, 0xe4, 0x0a, 0xcb, 0x1c, 0xca, 0x2a,
- 0xb4, 0x7b, 0x12, 0x49, 0x5c, 0x64, 0x01, 0x0b, 0xb5, 0xe2, 0x91, 0x8e, 0x34, 0x2f, 0x0c, 0x41,
- 0xf6, 0x50, 0xa4, 0x22, 0x14, 0x5f, 0xa5, 0xb9, 0x7b, 0x5a, 0x67, 0xd4, 0xfe, 0x9e, 0xee, 0xd9,
- 0x5f, 0x94, 0xc9, 0x12, 0x94, 0x0a, 0xb8, 0x0d, 0x17, 0xa0, 0xc4, 0x3e, 0x77, 0xfc, 0x46, 0xfc,
- 0xa3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0x69, 0xf0, 0x08, 0x21, 0x5e, 0x03, 0x8a, 0xb9, 0x40, 0x71,
- 0x25, 0x2d, 0xb6, 0xef, 0xfc, 0xa6, 0xaa, 0x72, 0xe7, 0x5f, 0x9f, 0x0c, 0x5a, 0x63, 0xc6, 0xea,
- 0x5c, 0x9c, 0x39, 0xda, 0x99, 0x26, 0x87, 0xab, 0x6d, 0xcf, 0xcb, 0xb7, 0xbd, 0xe6, 0x57, 0x33,
- 0xfb, 0x36, 0xb6, 0xef, 0xfd, 0x86, 0x44, 0x50, 0xb6, 0x43, 0xfa, 0xff, 0x07, 0xad, 0xf1, 0x45,
- 0x3d, 0xf5, 0xaf, 0x6b, 0x27, 0x07, 0xd5, 0x39, 0x8d, 0x4b, 0x67, 0x9c, 0x95, 0xe2, 0xc9, 0x74,
- 0xb5, 0xa3, 0xde, 0x7a, 0x47, 0xbd, 0xcd, 0x8e, 0x7a, 0x2f, 0x39, 0x25, 0xab, 0x9c, 0x92, 0x75,
- 0x4e, 0xc9, 0x26, 0xa7, 0xe4, 0x3d, 0xa7, 0xe4, 0xf5, 0x83, 0x7a, 0xb7, 0xc3, 0xda, 0xcf, 0xe0,
- 0x33, 0x00, 0x00, 0xff, 0xff, 0x30, 0x97, 0x8b, 0x11, 0x4b, 0x02, 0x00, 0x00,
-}
+func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
index d14d42591b..fcec553542 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
@@ -33,9 +33,9 @@ message PartialObjectMetadataList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
- optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2;
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2;
// items contains each of the included items.
- repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1;
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1;
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.protomessage.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 0000000000..a782b1d8f0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,24 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*PartialObjectMetadataList) ProtoMessage() {}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
index f16170a37b..68f261cc14 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
@@ -46,6 +46,7 @@ type ConditionStatus = v1.ConditionStatus
type IncludeObjectPolicy = v1.IncludeObjectPolicy
// TableOptions are used when a Table is requested by the caller.
+// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TableOptions = v1.TableOptions
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.model_name.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.model_name.go
new file mode 100644
index 0000000000..9c3600119a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,27 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PartialObjectMetadataList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList"
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
index 7415d81646..0c46ef2d16 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// but for the fields which did not change, copying is automated. This makes it
// easy to modify the structures you use in memory without affecting the format
// you store on disk or respond to in your external API calls.
-package conversion // import "k8s.io/apimachinery/pkg/conversion"
+package conversion
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
index 7b763de6f0..4c1002a4c1 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package queryparams provides conversion from versioned
// runtime objects to URL query values
-package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams"
+package queryparams
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
index c39b8039ae..49059e2635 100644
--- a/vendor/k8s.io/apimachinery/pkg/fields/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package fields implements a simple field system, parsing and matching
// selectors with sets of fields.
-package fields // import "k8s.io/apimachinery/pkg/fields"
+package fields
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
index 82de0051bd..35ba788094 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package labels implements a simple label system, parsing and matching
// selectors with sets of labels.
-package labels // import "k8s.io/apimachinery/pkg/labels"
+package labels
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
index 19d823cef7..670b010ba5 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
@@ -31,6 +31,9 @@ type Labels interface {
// Get returns the value for the provided label.
Get(label string) (value string)
+
+ // Lookup returns the value for the provided label if it exists and whether the provided label exist
+ Lookup(label string) (value string, exists bool)
}
// Set is a map of label:value. It implements Labels.
@@ -59,6 +62,12 @@ func (ls Set) Get(label string) string {
return ls[label]
}
+// Lookup returns the value for the provided label if it exists and whether the provided label exist
+func (ls Set) Lookup(label string) (string, bool) {
+ val, exists := ls[label]
+ return val, exists
+}
+
// AsSelector converts labels into a selectors. It does not
// perform any validation, which means the server will reject
// the request if the Set contains invalid values.
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index 5e60142405..031dcd21be 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -18,16 +18,18 @@ package labels
import (
"fmt"
+ "slices"
"sort"
"strconv"
"strings"
+ "k8s.io/klog/v2"
+
+ "k8s.io/apimachinery/pkg/api/validate/content"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
- "k8s.io/klog/v2"
- stringslices "k8s.io/utils/strings/slices"
)
var (
@@ -45,6 +47,19 @@ var (
// Requirements is AND of all requirements.
type Requirements []Requirement
+func (r Requirements) String() string {
+ var sb strings.Builder
+
+ for i, requirement := range r {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(requirement.String())
+ }
+
+ return sb.String()
+}
+
// Selector represents a label selector.
type Selector interface {
// Matches returns true if this selector matches the given set of labels.
@@ -102,6 +117,15 @@ func Nothing() Selector {
return sharedNothingSelector
}
+// MatchesNothing only returns true for selectors which are definitively determined to match no objects.
+// This currently only detects the `labels.Nothing()` selector, but may change over time to detect more selectors that match no objects.
+//
+// Note: The current implementation does not check for selector conflict scenarios (e.g., a=a,a!=a).
+// Support for detecting such cases can be added in the future.
+func MatchesNothing(selector Selector) bool {
+ return selector == sharedNothingSelector
+}
+
// NewSelector returns a nil selector
func NewSelector() Selector {
return internalSelector(nil)
@@ -223,26 +247,29 @@ func (r *Requirement) hasValue(value string) bool {
func (r *Requirement) Matches(ls Labels) bool {
switch r.operator {
case selection.In, selection.Equals, selection.DoubleEquals:
- if !ls.Has(r.key) {
+ val, exists := ls.Lookup(r.key)
+ if !exists {
return false
}
- return r.hasValue(ls.Get(r.key))
+ return r.hasValue(val)
case selection.NotIn, selection.NotEquals:
- if !ls.Has(r.key) {
+ val, exists := ls.Lookup(r.key)
+ if !exists {
return true
}
- return !r.hasValue(ls.Get(r.key))
+ return !r.hasValue(val)
case selection.Exists:
return ls.Has(r.key)
case selection.DoesNotExist:
return !ls.Has(r.key)
case selection.GreaterThan, selection.LessThan:
- if !ls.Has(r.key) {
+ val, exists := ls.Lookup(r.key)
+ if !exists {
return false
}
- lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
+ lsValue, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
+ klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", val, ls, err)
return false
}
@@ -285,6 +312,13 @@ func (r *Requirement) Values() sets.String {
return ret
}
+// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting.
+func (r *Requirement) ValuesUnsorted() []string {
+ ret := make([]string, 0, len(r.strValues))
+ ret = append(ret, r.strValues...)
+ return ret
+}
+
// Equal checks the equality of requirement.
func (r Requirement) Equal(x Requirement) bool {
if r.key != x.key {
@@ -293,7 +327,7 @@ func (r Requirement) Equal(x Requirement) bool {
if r.operator != x.operator {
return false
}
- return stringslices.Equal(r.strValues, x.strValues)
+ return slices.Equal(r.strValues, x.strValues)
}
// Empty returns true if the internalSelector doesn't restrict selection space
@@ -815,7 +849,6 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) {
return s, nil
}
if tok2 == CommaToken {
- p.consume(Values)
s.Insert("") // to handle ,, Double "" removed by StringSet
}
default: // it can be operator
@@ -894,7 +927,7 @@ func parse(selector string, path *field.Path) (internalSelector, error) {
}
func validateLabelKey(k string, path *field.Path) *field.Error {
- if errs := validation.IsQualifiedName(k); len(errs) != 0 {
+ if errs := content.IsLabelKey(k); len(errs) != 0 {
return field.Invalid(path, k, strings.Join(errs, "; "))
}
return nil
@@ -976,7 +1009,8 @@ type ValidatedSetSelector Set
func (s ValidatedSetSelector) Matches(labels Labels) bool {
for k, v := range s {
- if !labels.Has(k) || v != labels.Get(k) {
+ val, exists := labels.Lookup(k)
+ if !exists || v != val {
return false
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
index 62eb27afc1..b1a300959b 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
@@ -29,10 +29,11 @@ import (
"sync/atomic"
"time"
+ "sigs.k8s.io/structured-merge-diff/v6/value"
+
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "sigs.k8s.io/structured-merge-diff/v4/value"
"k8s.io/klog/v2"
)
@@ -53,6 +54,7 @@ type fieldInfo struct {
name string
nameValue reflect.Value
omitempty bool
+ omitzero func(dv reflect.Value) bool
}
type fieldsCacheMap map[structField]*fieldInfo
@@ -376,19 +378,24 @@ func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo {
typeField := structType.Field(field)
jsonTag := typeField.Tag.Get("json")
if len(jsonTag) == 0 {
- // Make the first character lowercase.
- if typeField.Name == "" {
+ if !typeField.Anonymous {
+ // match stdlib behavior for naming fields that don't specify a json tag name
info.name = typeField.Name
- } else {
- info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:]
}
} else {
items := strings.Split(jsonTag, ",")
info.name = items[0]
+ if len(info.name) == 0 && !typeField.Anonymous {
+ // match stdlib behavior for naming fields that don't specify a json tag name
+ info.name = typeField.Name
+ }
+
for i := range items {
- if items[i] == "omitempty" {
+ if i > 0 && items[i] == "omitempty" {
info.omitempty = true
- break
+ }
+ if i > 0 && items[i] == "omitzero" {
+ info.omitzero = value.OmitZeroFunc(typeField.Type)
}
}
}
@@ -775,7 +782,7 @@ func pointerToUnstructured(sv, dv reflect.Value) error {
return toUnstructured(sv.Elem(), dv)
}
-func isZero(v reflect.Value) bool {
+func isEmpty(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.String:
return v.Len() == 0
@@ -816,10 +823,14 @@ func structToUnstructured(sv, dv reflect.Value) error {
// This field should be skipped.
continue
}
- if fieldInfo.omitempty && isZero(fv) {
+ if fieldInfo.omitempty && isEmpty(fv) {
// omitempty fields should be ignored.
continue
}
+ if fieldInfo.omitzero != nil && fieldInfo.omitzero(fv) {
+ // omitzero fields should be ignored
+ continue
+ }
if len(fieldInfo.name) == 0 {
// This field is inlined.
if err := toUnstructured(fv, dv); err != nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
index 89feb40103..fd012dbc79 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.runtime
+
// Package runtime includes helper functions for working with API objects
// that follow the kubernetes API object conventions, which are:
//
@@ -48,4 +50,4 @@ limitations under the License.
//
// As a bonus, a few common types useful from all api objects and versions
// are provided in types.go.
-package runtime // import "k8s.io/apimachinery/pkg/runtime"
+package runtime
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
index 9056397fa5..60c000bcb7 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
@@ -18,16 +18,77 @@ package runtime
import (
"bytes"
- "encoding/json"
"errors"
+ "fmt"
+
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
+ "k8s.io/apimachinery/pkg/util/json"
)
+// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the
+// signature of ToUnstructured does not allow returning an error value in cases where the conversion
+// is not possible (content type is unrecognized or bytes don't match content type).
+func rawToUnstructured(raw []byte, contentType string) (interface{}, error) {
+ switch contentType {
+ case ContentTypeJSON:
+ var u interface{}
+ if err := json.Unmarshal(raw, &u); err != nil {
+ return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err)
+ }
+ return u, nil
+ case ContentTypeCBOR:
+ var u interface{}
+ if err := cbor.Unmarshal(raw, &u); err != nil {
+ return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err)
+ }
+ return u, nil
+ default:
+ return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured")
+ }
+}
+
+func (re RawExtension) guessContentType() string {
+ switch {
+ case bytes.HasPrefix(re.Raw, cborSelfDescribed):
+ return ContentTypeCBOR
+ case len(re.Raw) > 0:
+ switch re.Raw[0] {
+ case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null.
+ return ContentTypeJSON
+ }
+ }
+ return ""
+}
+
func (re *RawExtension) UnmarshalJSON(in []byte) error {
if re == nil {
return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
}
- if !bytes.Equal(in, []byte("null")) {
- re.Raw = append(re.Raw[0:0], in...)
+ if bytes.Equal(in, []byte("null")) {
+ return nil
+ }
+ re.Raw = append(re.Raw[0:0], in...)
+ return nil
+}
+
+var (
+ cborNull = []byte{0xf6}
+ cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7}
+)
+
+func (re *RawExtension) UnmarshalCBOR(in []byte) error {
+ if re == nil {
+ return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer")
+ }
+ if !bytes.Equal(in, cborNull) {
+ if !bytes.HasPrefix(in, cborSelfDescribed) {
+ // The self-described CBOR tag doesn't change the interpretation of the data
+ // item it encloses, but it is useful as a magic number. Its encoding is
+ // also what is used to implement the CBOR RecognizingDecoder.
+ re.Raw = append(re.Raw[:0], cborSelfDescribed...)
+ }
+ re.Raw = append(re.Raw, in...)
}
return nil
}
@@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) {
}
return []byte("null"), nil
}
- // TODO: Check whether ContentType is actually JSON before returning it.
- return re.Raw, nil
+
+ contentType := re.guessContentType()
+ if contentType == ContentTypeJSON {
+ return re.Raw, nil
+ }
+
+ u, err := rawToUnstructured(re.Raw, contentType)
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(u)
+}
+
+func (re RawExtension) MarshalCBOR() ([]byte, error) {
+ if re.Raw == nil {
+ if re.Object != nil {
+ return cbor.Marshal(re.Object)
+ }
+ return cbor.Marshal(nil)
+ }
+
+ contentType := re.guessContentType()
+ if contentType == ContentTypeCBOR {
+ return re.Raw, nil
+ }
+
+ u, err := rawToUnstructured(re.Raw, contentType)
+ if err != nil {
+ return nil, err
+ }
+ return cbor.Marshal(u)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
index ec677a7d96..f5e78d4b36 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
+// source: k8s.io/apimachinery/pkg/runtime/generated.proto
package runtime
@@ -23,146 +23,16 @@ import (
fmt "fmt"
io "io"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
-
- proto "github.com/gogo/protobuf/proto"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *RawExtension) Reset() { *m = RawExtension{} }
-func (*RawExtension) ProtoMessage() {}
-func (*RawExtension) Descriptor() ([]byte, []int) {
- return fileDescriptor_9d3c45d7f546725c, []int{0}
-}
-func (m *RawExtension) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RawExtension) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RawExtension.Merge(m, src)
-}
-func (m *RawExtension) XXX_Size() int {
- return m.Size()
-}
-func (m *RawExtension) XXX_DiscardUnknown() {
- xxx_messageInfo_RawExtension.DiscardUnknown(m)
-}
+func (m *RawExtension) Reset() { *m = RawExtension{} }
-var xxx_messageInfo_RawExtension proto.InternalMessageInfo
+func (m *TypeMeta) Reset() { *m = TypeMeta{} }
-func (m *TypeMeta) Reset() { *m = TypeMeta{} }
-func (*TypeMeta) ProtoMessage() {}
-func (*TypeMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_9d3c45d7f546725c, []int{1}
-}
-func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypeMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeMeta.Merge(m, src)
-}
-func (m *TypeMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *TypeMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeMeta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
-
-func (m *Unknown) Reset() { *m = Unknown{} }
-func (*Unknown) ProtoMessage() {}
-func (*Unknown) Descriptor() ([]byte, []int) {
- return fileDescriptor_9d3c45d7f546725c, []int{2}
-}
-func (m *Unknown) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Unknown) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Unknown.Merge(m, src)
-}
-func (m *Unknown) XXX_Size() int {
- return m.Size()
-}
-func (m *Unknown) XXX_DiscardUnknown() {
- xxx_messageInfo_Unknown.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Unknown proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension")
- proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta")
- proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c)
-}
-
-var fileDescriptor_9d3c45d7f546725c = []byte{
- // 380 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0xaa, 0x13, 0x31,
- 0x14, 0xc6, 0x27, 0xb7, 0x85, 0x7b, 0x4d, 0x0b, 0x57, 0xe2, 0xc2, 0xd1, 0x45, 0xe6, 0xd2, 0x95,
- 0x77, 0x61, 0x02, 0x17, 0x04, 0xb7, 0x9d, 0x52, 0x50, 0x44, 0x90, 0xe0, 0x1f, 0x70, 0x65, 0x3a,
- 0x13, 0xa7, 0x61, 0xe8, 0xc9, 0x90, 0x66, 0x1c, 0xbb, 0xf3, 0x11, 0x7c, 0xac, 0x2e, 0xbb, 0xec,
- 0xaa, 0xd8, 0xf1, 0x21, 0xdc, 0x4a, 0xd3, 0xb4, 0x56, 0x5d, 0x74, 0x97, 0x73, 0xbe, 0xef, 0xf7,
- 0x9d, 0x73, 0x20, 0xf8, 0x45, 0xf9, 0x7c, 0xce, 0xb4, 0xe1, 0x65, 0x3d, 0x51, 0x16, 0x94, 0x53,
- 0x73, 0xfe, 0x45, 0x41, 0x6e, 0x2c, 0x0f, 0x82, 0xac, 0xf4, 0x4c, 0x66, 0x53, 0x0d, 0xca, 0x2e,
- 0x78, 0x55, 0x16, 0xdc, 0xd6, 0xe0, 0xf4, 0x4c, 0xf1, 0x42, 0x81, 0xb2, 0xd2, 0xa9, 0x9c, 0x55,
- 0xd6, 0x38, 0x43, 0x92, 0x3d, 0xc0, 0x4e, 0x01, 0x56, 0x95, 0x05, 0x0b, 0xc0, 0xe3, 0xa7, 0x85,
- 0x76, 0xd3, 0x7a, 0xc2, 0x32, 0x33, 0xe3, 0x85, 0x29, 0x0c, 0xf7, 0xdc, 0xa4, 0xfe, 0xec, 0x2b,
- 0x5f, 0xf8, 0xd7, 0x3e, 0x6f, 0x70, 0x8b, 0xfb, 0x42, 0x36, 0xe3, 0xaf, 0x4e, 0xc1, 0x5c, 0x1b,
- 0x20, 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0x37, 0xe8, 0x49, 0x3f, 0xbd, 0x6c, 0x37, 0x49, 0x47,
- 0xc8, 0x46, 0xec, 0x7a, 0x83, 0x4f, 0xf8, 0xea, 0xed, 0xa2, 0x52, 0xaf, 0x95, 0x93, 0xe4, 0x0e,
- 0x63, 0x59, 0xe9, 0xf7, 0xca, 0xee, 0x20, 0xef, 0xbe, 0x97, 0x92, 0xe5, 0x26, 0x89, 0xda, 0x4d,
- 0x82, 0x87, 0x6f, 0x5e, 0x06, 0x45, 0x9c, 0xb8, 0xc8, 0x0d, 0xee, 0x96, 0x1a, 0xf2, 0xf8, 0xc2,
- 0xbb, 0xfb, 0xc1, 0xdd, 0x7d, 0xa5, 0x21, 0x17, 0x5e, 0x19, 0xfc, 0x42, 0xf8, 0xf2, 0x1d, 0x94,
- 0x60, 0x1a, 0x20, 0x1f, 0xf0, 0x95, 0x0b, 0xd3, 0x7c, 0x7e, 0xef, 0xee, 0x96, 0x9d, 0xb9, 0x9d,
- 0x1d, 0xd6, 0x4b, 0xef, 0x87, 0xf0, 0xe3, 0xc2, 0xe2, 0x18, 0x76, 0xb8, 0xf0, 0xe2, 0xff, 0x0b,
- 0xc9, 0x10, 0x5f, 0x67, 0x06, 0x9c, 0x02, 0x37, 0x86, 0xcc, 0xe4, 0x1a, 0x8a, 0xb8, 0xe3, 0x97,
- 0x7d, 0x18, 0xf2, 0xae, 0x47, 0x7f, 0xcb, 0xe2, 0x5f, 0x3f, 0x79, 0x86, 0x7b, 0xa1, 0xb5, 0x1b,
- 0x1d, 0x77, 0x3d, 0xfe, 0x20, 0xe0, 0xbd, 0xd1, 0x1f, 0x49, 0x9c, 0xfa, 0xd2, 0xf1, 0x72, 0x4b,
- 0xa3, 0xd5, 0x96, 0x46, 0xeb, 0x2d, 0x8d, 0xbe, 0xb5, 0x14, 0x2d, 0x5b, 0x8a, 0x56, 0x2d, 0x45,
- 0xeb, 0x96, 0xa2, 0x1f, 0x2d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0x63, 0x72, 0xe6, 0xb7, 0xfc, 0x0e,
- 0x00, 0x00, 0xff, 0xff, 0x1f, 0x32, 0xd5, 0x68, 0x68, 0x02, 0x00, 0x00,
-}
+func (m *Unknown) Reset() { *m = Unknown{} }
func (m *RawExtension) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.protomessage.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.protomessage.pb.go
new file mode 100644
index 0000000000..1716853ff1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package runtime
+
+func (*RawExtension) ProtoMessage() {}
+
+func (*TypeMeta) ProtoMessage() {}
+
+func (*Unknown) ProtoMessage() {}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
index f46a24cc6c..395dfdbd02 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
@@ -236,10 +236,14 @@ func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error {
gvk = preferredGVK
}
}
- kind.SetGroupVersionKind(gvk)
- err = e.Encoder.Encode(obj, stream)
- kind.SetGroupVersionKind(oldGVK)
- return err
+
+ // The gvk only needs to be set if not already as desired.
+ if gvk != oldGVK {
+ kind.SetGroupVersionKind(gvk)
+ defer kind.SetGroupVersionKind(oldGVK)
+ }
+
+ return e.Encoder.Encode(obj, stream)
}
// WithoutVersionDecoder clears the group version kind of a deserialized object.
@@ -280,3 +284,21 @@ func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error {
func (e *encoderWithAllocator) Identifier() Identifier {
return e.encoder.Identifier()
}
+
+type nondeterministicEncoderToEncoderAdapter struct {
+ NondeterministicEncoder
+}
+
+func (e nondeterministicEncoderToEncoderAdapter) Encode(obj Object, w io.Writer) error {
+ return e.EncodeNondeterministic(obj, w)
+}
+
+// UseNondeterministicEncoding returns an Encoder that encodes objects using the provided Encoder's
+// EncodeNondeterministic method if it implements NondeterministicEncoder, otherwise it returns the
+// provided Encoder as-is.
+func UseNondeterministicEncoding(encoder Encoder) Encoder {
+ if nondeterministic, ok := encoder.(NondeterministicEncoder); ok {
+ return nondeterministicEncoderToEncoderAdapter{nondeterministic}
+ }
+ return encoder
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
index e89ea89391..8456c21d31 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -69,6 +69,19 @@ type Encoder interface {
Identifier() Identifier
}
+// NondeterministicEncoder is implemented by Encoders that can serialize objects more efficiently in
+// cases where the output does not need to be deterministic.
+type NondeterministicEncoder interface {
+ Encoder
+
+ // EncodeNondeterministic writes an object to the stream. Unlike the Encode method of
+ // Encoder, EncodeNondeterministic does not guarantee that any two invocations will write
+ // the same sequence of bytes to the io.Writer. Any differences will not be significant to a
+ // generic decoder. For example, map entries and struct fields might be encoded in any
+ // order.
+ EncodeNondeterministic(Object, io.Writer) error
+}
+
// MemoryAllocator is responsible for allocating memory.
// By encapsulating memory allocation into its own interface, we can reuse the memory
// across many operations in places we know it can significantly improve the performance.
@@ -246,6 +259,7 @@ type ObjectDefaulter interface {
type ObjectVersioner interface {
ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
+ PrioritizedVersionsForGroup(group string) []schema.GroupVersion
}
// ObjectConvertor converts an object to a different version.
@@ -371,3 +385,9 @@ type Unstructured interface {
// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
EachListItemWithAlloc(func(Object) error) error
}
+
+// ApplyConfiguration is an interface that root apply configuration types implement.
+type ApplyConfiguration interface {
+ // IsApplyConfiguration is implemented if the object is the root of an apply configuration.
+ IsApplyConfiguration()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
index 46b1e787bd..ed57e08afe 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
@@ -15,45 +15,6 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
+// source: k8s.io/apimachinery/pkg/runtime/schema/generated.proto
package schema
-
-import (
- fmt "fmt"
-
- math "math"
-
- proto "github.com/gogo/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func init() {
- proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d)
-}
-
-var fileDescriptor_0462724132518e0d = []byte{
- // 186 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xad, 0x8e, 0xc3, 0x30,
- 0x0c, 0xc0, 0xf1, 0x84, 0x1e, 0x3c, 0x78, 0xc0, 0xb0, 0xec, 0x62, 0x7a, 0xf8, 0xf0, 0xa4, 0xf1,
- 0xb1, 0xb4, 0xf5, 0xd2, 0x28, 0xca, 0x87, 0xd2, 0x64, 0xd2, 0xd8, 0x1e, 0x61, 0x8f, 0x55, 0x58,
- 0x58, 0xb8, 0x66, 0x2f, 0x32, 0x29, 0x2d, 0x18, 0x1c, 0xf3, 0x5f, 0xd6, 0xcf, 0xf2, 0xd7, 0xd1,
- 0xfc, 0x8d, 0x42, 0x7b, 0x34, 0xb9, 0xa5, 0xe8, 0x28, 0xd1, 0x88, 0x17, 0x72, 0xbd, 0x8f, 0xb8,
- 0x2f, 0x64, 0xd0, 0x56, 0x76, 0x83, 0x76, 0x14, 0xaf, 0x18, 0x8c, 0xc2, 0x98, 0x5d, 0xd2, 0x96,
- 0x70, 0xec, 0x06, 0xb2, 0x12, 0x15, 0x39, 0x8a, 0x32, 0x51, 0x2f, 0x42, 0xf4, 0xc9, 0x7f, 0x37,
- 0x9b, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0xc4, 0xe6, 0x7e, 0x7e, 0x95, 0x4e, 0x43, 0x6e,
- 0x45, 0xe7, 0x2d, 0x2a, 0xaf, 0x3c, 0x56, 0xde, 0xe6, 0x73, 0xad, 0x1a, 0x75, 0xda, 0xce, 0xfe,
- 0x1f, 0xa6, 0x15, 0xd8, 0xbc, 0x02, 0x5b, 0x56, 0x60, 0xb7, 0x02, 0x7c, 0x2a, 0xc0, 0xe7, 0x02,
- 0x7c, 0x29, 0xc0, 0x1f, 0x05, 0xf8, 0xfd, 0x09, 0xec, 0xd4, 0x7c, 0xf6, 0xf4, 0x2b, 0x00, 0x00,
- 0xff, 0xff, 0x12, 0xb4, 0xae, 0x48, 0xf6, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.protomessage.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.protomessage.pb.go
new file mode 100644
index 0000000000..047437377f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.protomessage.pb.go
@@ -0,0 +1,22 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package schema
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
index a5b116718d..e2fbeabdd0 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -17,15 +17,19 @@ limitations under the License.
package runtime
import (
+ "context"
"fmt"
"reflect"
"strings"
+ "k8s.io/apimachinery/pkg/api/operation"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/naming"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/kube-openapi/pkg/util"
)
// Scheme defines methods for serializing and deserializing API objects, a type
@@ -68,6 +72,12 @@ type Scheme struct {
// the provided object must be a pointer.
defaulterFuncs map[reflect.Type]func(interface{})
+ // validationFuncs is a map to funcs to be called with an object to perform validation.
+ // The provided object must be a pointer.
+ // If oldObject is non-nil, update validation is performed and may perform additional
+ // validation such as transition rules and immutability checks.
+ validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}) field.ErrorList
+
// converter stores all registered conversion functions. It also has
// default converting behavior.
converter *conversion.Converter
@@ -96,6 +106,7 @@ func NewScheme() *Scheme {
unversionedKinds: map[string]reflect.Type{},
fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
defaulterFuncs: map[reflect.Type]func(interface{}){},
+ validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}) field.ErrorList{},
versionPriority: map[string][]string{},
schemeName: naming.GetNameFromCallsite(internalPackages...),
}
@@ -347,6 +358,35 @@ func (s *Scheme) Default(src Object) {
}
}
+// AddValidationFunc registered a function that can validate the object, and
+// oldObject. These functions will be invoked when Validate() or ValidateUpdate()
+// is called. The function will never be called unless the validated object
+// matches srcType. If this function is invoked twice with the same srcType, the
+// fn passed to the later call will be used instead.
+func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}) field.ErrorList) {
+ s.validationFuncs[reflect.TypeOf(srcType)] = fn
+}
+
+// Validate validates the provided Object according to the generated declarative validation code.
+// WARNING: This does not validate all objects! The handwritten validation code in validation.go
+// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
+func (s *Scheme) Validate(ctx context.Context, options []string, object Object, subresources ...string) field.ErrorList {
+ if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
+ return fn(ctx, operation.Operation{Type: operation.Create, Request: operation.Request{Subresources: subresources}, Options: options}, object, nil)
+ }
+ return nil
+}
+
+// ValidateUpdate validates the provided object and oldObject according to the generated declarative validation code.
+// WARNING: This does not validate all objects! The handwritten validation code in validation.go
+// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
+func (s *Scheme) ValidateUpdate(ctx context.Context, options []string, object, oldObject Object, subresources ...string) field.ErrorList {
+ if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
+ return fn(ctx, operation.Operation{Type: operation.Update, Request: operation.Request{Subresources: subresources}, Options: options}, object, oldObject)
+ }
+ return nil
+}
+
// Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example,
@@ -704,3 +744,76 @@ func (s *Scheme) Name() string {
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
// call chains to NewReflector, so they'd be low entropy names for reflectors
var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"}
+
+// ToOpenAPIDefinitionName returns the REST-friendly OpenAPI definition name known type identified by groupVersionKind.
+// If the groupVersionKind does not identify a known type, an error is returned.
+// The Version field of groupVersionKind is required, and the Group and Kind fields are required for unstructured.Unstructured
+// types. If a required field is empty, an error is returned.
+//
+// The OpenAPI definition name is the canonical name of the type, with the group and version removed.
+// For example, the OpenAPI definition name of Pod is `io.k8s.api.core.v1.Pod`.
+//
+// This respects the util.OpenAPIModelNamer interface and will return the name returned by
+// OpenAPIModelName() if it is defined on the type.
+//
+// A known type that is registered as an unstructured.Unstructured type is treated as a custom resource and
+// which has an OpenAPI definition name of the form `.`.
+// For example, the OpenAPI definition name of `group: stable.example.com, version: v1, kind: Pod` is
+// `com.example.stable.v1.Pod`.
+func (s *Scheme) ToOpenAPIDefinitionName(groupVersionKind schema.GroupVersionKind) (string, error) {
+ if groupVersionKind.Version == "" { // Empty version is not allowed by New() so check it first to avoid a panic.
+ return "", fmt.Errorf("version is required on all types: %v", groupVersionKind)
+ }
+ example, err := s.New(groupVersionKind)
+ if err != nil {
+ return "", err
+ }
+
+ // Use a namer if provided
+ if namer, ok := example.(util.OpenAPIModelNamer); ok {
+ return namer.OpenAPIModelName(), nil
+ }
+
+ if _, ok := example.(Unstructured); ok {
+ if groupVersionKind.Group == "" || groupVersionKind.Kind == "" {
+ return "", fmt.Errorf("unable to convert GroupVersionKind with empty fields to unstructured type to an OpenAPI definition name: %v", groupVersionKind)
+ }
+ return reverseParts(groupVersionKind.Group) + "." + groupVersionKind.Version + "." + groupVersionKind.Kind, nil
+ }
+ rtype := reflect.TypeOf(example).Elem()
+ name := toOpenAPIDefinitionName(rtype.PkgPath() + "." + rtype.Name())
+ return name, nil
+}
+
+// toOpenAPIDefinitionName converts Golang package/type canonical name into REST friendly OpenAPI name.
+// Input is expected to be `PkgPath + "." TypeName.
+//
+// Examples of REST friendly OpenAPI name:
+//
+// Input: k8s.io/api/core/v1.Pod
+// Output: io.k8s.api.core.v1.Pod
+//
+// Input: k8s.io/api/core/v1
+// Output: io.k8s.api.core.v1
+//
+// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
+// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
+//
+// Note that this is a copy of ToRESTFriendlyName from k8s.io/kube-openapi/pkg/util. It is duplicated here to avoid
+// a dependency on kube-openapi.
+func toOpenAPIDefinitionName(name string) string {
+ nameParts := strings.Split(name, "/")
+ // Reverse first part. e.g., io.k8s... instead of k8s.io...
+ if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
+ nameParts[0] = reverseParts(nameParts[0])
+ }
+ return strings.Join(nameParts, ".")
+}
+
+func reverseParts(dotSeparatedName string) string {
+ parts := strings.Split(dotSeparatedName, ".")
+ for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
+ parts[i], parts[j] = parts[j], parts[i]
+ }
+ return strings.Join(parts, ".")
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
new file mode 100644
index 0000000000..945dc47c14
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and
+// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular,
+// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions.
+package direct
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
+)
+
+// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will
+// make the same choice as the CBOR implementation of runtime.Serializer.
+func Marshal(src any) ([]byte, error) {
+ return modes.Encode.Marshal(src)
+}
+
+// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to
+// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer.
+func Unmarshal(src []byte, dst any) error {
+ return modes.Decode.Unmarshal(src, dst)
+}
+
+// Diagnose accepts well-formed CBOR bytes and returns a string representing the same data item in
+// human-readable diagnostic notation (RFC 8949 Section 8). The diagnostic notation is not meant to
+// be parsed.
+func Diagnose(src []byte) (string, error) {
+ return modes.Diagnostic.Diagnose(src)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
new file mode 100644
index 0000000000..f14cbd6b58
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "bytes"
+ "sync"
+)
+
+var buffers = BufferProvider{p: new(sync.Pool)}
+
+type buffer struct {
+ bytes.Buffer
+}
+
+type pool interface {
+ Get() interface{}
+ Put(interface{})
+}
+
+type BufferProvider struct {
+ p pool
+}
+
+func (b *BufferProvider) Get() *buffer {
+ if buf, ok := b.p.Get().(*buffer); ok {
+ return buf
+ }
+ return &buffer{}
+}
+
+func (b *BufferProvider) Put(buf *buffer) {
+ if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ {
+ // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption
+ // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as
+ // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small"
+ // objects very frequently and much larger objects (especially large lists) only
+ // occasionally. Under steady load, pooled buffers tend to be borrowed frequently
+ // enough to prevent them from being released. Over time, each buffer is used to
+ // encode a large object and its capacity increases accordingly. The result is that
+ // practically all buffers in the pool retain much more capacity than needed to
+ // encode most objects.
+
+ // As a basic mitigation for the worst case, buffers with more capacity than the
+ // default max request body size are never returned to the pool.
+ // TODO: Optimize for higher buffer utilization.
+ return
+ }
+ buf.Reset()
+ b.p.Put(buf)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
new file mode 100644
index 0000000000..0210132ffa
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "reflect"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry {
+ var opts []func(*cbor.SimpleValueRegistry) error
+ for sv := 0; sv <= 255; sv++ {
+ // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved
+ // and considered ill-formed by the CBOR specification. We only accept false (20),
+ // true (21), and null (22).
+ switch sv {
+ case 20: // false
+ case 21: // true
+ case 22: // null
+ case 24, 25, 26, 27, 28, 29, 30, 31: // reserved
+ default:
+ opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv)))
+ }
+ }
+ simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...)
+ if err != nil {
+ panic(err)
+ }
+ return simpleValues
+}()
+
+// decode is the basis for the Decode mode, with no JSONUnmarshalerTranscoder
+// configured. TranscodeToJSON uses this directly rather than Decode to avoid an initialization
+// cycle between the two. Everything else should use one of the exported DecModes.
+var decode cbor.DecMode = func() cbor.DecMode {
+ decode, err := cbor.DecOptions{
+ // Maps with duplicate keys are well-formed but invalid according to the CBOR spec
+ // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map
+ // keys are rejected outright and not surfaced as a strict decoding error.
+ DupMapKey: cbor.DupMapKeyEnforcedAPF,
+
+ // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted
+ // with or without tagging. If a tag number is present, it must be valid.
+ TimeTag: cbor.DecTagOptional,
+
+ // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit
+ // is 10000.
+ MaxNestedLevels: 64,
+
+ MaxArrayElements: 1024,
+ MaxMapPairs: 1024,
+
+ // Indefinite-length sequences aren't produced by this serializer, but other
+ // implementations can.
+ IndefLength: cbor.IndefLengthAllowed,
+
+ // Accept inputs that contain CBOR tags.
+ TagsMd: cbor.TagsAllowed,
+
+ // Decode type 0 (unsigned integer) as int64.
+ // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64.
+ IntDec: cbor.IntDecConvertSignedOrFail,
+
+ // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for
+ // decodes into interface{}.
+ MapKeyByteString: cbor.MapKeyByteStringForbidden,
+
+ // Error on map keys that don't map to a field in the destination struct.
+ ExtraReturnErrors: cbor.ExtraDecErrorUnknownField,
+
+ // Decode maps into concrete type map[string]interface{} when the destination is an
+ // interface{}.
+ DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)),
+
+ // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but
+ // invalid according to the CBOR spec. Reject invalid inputs. Encoders are
+ // responsible for ensuring that all text strings they produce contain valid UTF-8
+ // sequences and may use the byte string major type to encode strings that have not
+ // been validated.
+ UTF8: cbor.UTF8RejectInvalid,
+
+ // Never make a case-insensitive match between a map key and a struct field.
+ FieldNameMatching: cbor.FieldNameMatchingCaseSensitive,
+
+ // Produce string concrete values when decoding a CBOR byte string into interface{}.
+ DefaultByteStringType: reflect.TypeOf(""),
+
+ // Allow CBOR byte strings to be decoded into string destination values. If a byte
+ // string is enclosed in an "expected later encoding" tag
+ // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text
+ // encoding indicated by that tag (e.g. base64) will be applied to the contents of
+ // the byte string.
+ ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding,
+
+ // Allow CBOR byte strings to match struct fields when appearing as a map key.
+ FieldNameByteString: cbor.FieldNameByteStringAllowed,
+
+ // When decoding an unrecognized tag to interface{}, return the decoded tag content
+ // instead of the default, a cbor.Tag representing a (number, content) pair.
+ UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny,
+
+ // Decode time tags to interface{} as strings containing RFC 3339 timestamps.
+ TimeTagToAny: cbor.TimeTagToRFC3339Nano,
+
+ // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339
+ // timestamps.
+ ByteStringToTime: cbor.ByteStringToTimeAllowed,
+
+ // Reject NaN and infinite floating-point values since they don't have a JSON
+ // representation (RFC 8259 Section 6).
+ NaN: cbor.NaNDecodeForbidden,
+ Inf: cbor.InfDecodeForbidden,
+
+ // When unmarshaling a byte string into a []byte, assume that the byte string
+ // contains base64-encoded bytes, unless explicitly counterindicated by an "expected
+ // later encoding" tag. This is consistent with the because of unmarshaling a JSON
+ // text into a []byte.
+ ByteStringExpectedFormat: cbor.ByteStringExpectedBase64,
+
+ // Reject the arbitrary-precision integer tags because they can't be faithfully
+ // roundtripped through the allowable Unstructured types.
+ BignumTag: cbor.BignumTagForbidden,
+
+ // Reject anything other than the simple values true, false, and null.
+ SimpleValues: simpleValues,
+
+ // Disable default recognition of types implementing encoding.BinaryUnmarshaler,
+ // which is not recognized for JSON decoding.
+ BinaryUnmarshaler: cbor.BinaryUnmarshalerNone,
+
+ // Marshal types that implement encoding.TextMarshaler by calling their MarshalText
+ // method and encoding the result to a CBOR text string.
+ TextUnmarshaler: cbor.TextUnmarshalerTextString,
+ }.DecMode()
+ if err != nil {
+ panic(err)
+ }
+ return decode
+}()
+
+var Decode cbor.DecMode = func() cbor.DecMode {
+ opts := decode.DecOptions()
+ // When decoding into a value of a type that implements json.Unmarshaler (and does not
+ // implement cbor.Unmarshaler), transcode the input to JSON and pass it to the value's
+ // UnmarshalJSON method.
+ opts.JSONUnmarshalerTranscoder = TranscodeFunc(TranscodeToJSON)
+ dm, err := opts.DecMode()
+ if err != nil {
+ panic(err)
+ }
+ return dm
+}()
+
+// DecodeLax is derived from Decode, but does not complain about unknown fields in the input.
+var DecodeLax cbor.DecMode = func() cbor.DecMode {
+ opts := Decode.DecOptions()
+ opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit
+ dm, err := opts.DecMode()
+ if err != nil {
+ panic(err)
+ }
+ return dm
+}()
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
new file mode 100644
index 0000000000..61f3f145f5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "github.com/fxamacker/cbor/v2"
+)
+
+var Diagnostic cbor.DiagMode = func() cbor.DiagMode {
+ opts := Decode.DecOptions()
+ diagnostic, err := cbor.DiagOptions{
+ ByteStringText: true,
+
+ MaxNestedLevels: opts.MaxNestedLevels,
+ MaxArrayElements: opts.MaxArrayElements,
+ MaxMapPairs: opts.MaxMapPairs,
+ }.DiagMode()
+ if err != nil {
+ panic(err)
+ }
+ return diagnostic
+}()
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
new file mode 100644
index 0000000000..815dbe6660
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
@@ -0,0 +1,177 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "io"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// encode is the basis for the Encode mode, with no JSONMarshalerTranscoder
+// configured. TranscodeFromJSON uses this directly rather than Encode to avoid an initialization
+// cycle between the two. Everything else should use one of the exported EncModes.
+var encode = EncMode{
+ delegate: func() cbor.UserBufferEncMode {
+ encode, err := cbor.EncOptions{
+ // Map keys need to be sorted to have deterministic output, and this is the order
+ // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements".
+ Sort: cbor.SortBytewiseLexical,
+
+ // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store
+ // floats in the smallest width that preserves value so that equivalent float32 and
+ // float64 values encode to identical bytes, as they do in a JSON
+ // encoding. Satisfies one of the "Core Deterministic Encoding Requirements".
+ ShortestFloat: cbor.ShortestFloat16,
+
+ // Error on attempt to encode NaN and infinite values. This is what the JSON
+ // serializer does.
+ NaNConvert: cbor.NaNConvertReject,
+ InfConvert: cbor.InfConvertReject,
+
+ // Error on attempt to encode math/big.Int values, which can't be faithfully
+ // roundtripped through Unstructured in general (the dynamic numeric types allowed
+ // in Unstructured are limited to float64 and int64).
+ BigIntConvert: cbor.BigIntConvertReject,
+
+ // MarshalJSON for time.Time writes RFC3339 with nanos.
+ Time: cbor.TimeRFC3339Nano,
+
+ // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by
+ // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no
+ // reliable way of knowing that a particular string originated from serializing a
+ // time.Time), so producing tag 0 has little use.
+ TimeTag: cbor.EncTagNone,
+
+ // Indefinite-length items have multiple encodings and aren't being used anyway, so
+ // disable to avoid an opportunity for nondeterminism.
+ IndefLength: cbor.IndefLengthForbidden,
+
+ // Preserve distinction between nil and empty for slices and maps.
+ NilContainers: cbor.NilContainerAsNull,
+
+ // OK to produce tags.
+ TagsMd: cbor.TagsAllowed,
+
+ // Use the same definition of "empty" as encoding/json.
+ OmitEmpty: cbor.OmitEmptyGoValue,
+
+ // The CBOR types text string and byte string are structurally equivalent, with the
+ // semantic difference that a text string whose content is an invalid UTF-8 sequence
+ // is itself invalid. We reject all invalid text strings at decode time and do not
+ // validate or sanitize all Go strings at encode time. Encoding Go strings to the
+ // byte string type is comparable to the existing Protobuf behavior and cheaply
+ // ensures that the output is valid CBOR.
+ String: cbor.StringToByteString,
+
+ // Encode struct field names to the byte string type rather than the text string
+ // type.
+ FieldName: cbor.FieldNameToByteString,
+
+ // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte
+ // strings.
+ ByteArray: cbor.ByteArrayToArray,
+
+ // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64
+ // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to
+ // interoperate with the existing JSON behavior. This indicates to the decoder that,
+ // when decoding into a string (or unstructured), the resulting value should be the
+ // base64 encoding of the original bytes. No base64 encoding or decoding needs to be
+ // performed for []byte-to-CBOR-to-[]byte roundtrips.
+ ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64,
+
+ // Disable default recognition of types implementing encoding.BinaryMarshaler, which
+ // is not recognized for JSON encoding.
+ BinaryMarshaler: cbor.BinaryMarshalerNone,
+
+ // Unmarshal into types that implement encoding.TextUnmarshaler by passing
+ // the contents of a CBOR string to their UnmarshalText method.
+ TextMarshaler: cbor.TextMarshalerTextString,
+ }.UserBufferEncMode()
+ if err != nil {
+ panic(err)
+ }
+ return encode
+ }(),
+}
+
+var Encode = EncMode{
+ delegate: func() cbor.UserBufferEncMode {
+ opts := encode.options()
+ // To encode a value of a type that implements json.Marshaler (and does not
+ // implement cbor.Marshaler), transcode the result of calling its MarshalJSON method
+ // directly to CBOR.
+ opts.JSONMarshalerTranscoder = TranscodeFunc(TranscodeFromJSON)
+ em, err := opts.UserBufferEncMode()
+ if err != nil {
+ panic(err)
+ }
+ return em
+ }(),
+}
+
+var EncodeNondeterministic = EncMode{
+ delegate: func() cbor.UserBufferEncMode {
+ opts := Encode.options()
+ opts.Sort = cbor.SortFastShuffle
+ em, err := opts.UserBufferEncMode()
+ if err != nil {
+ panic(err)
+ }
+ return em
+ }(),
+}
+
+type EncMode struct {
+ delegate cbor.UserBufferEncMode
+}
+
+func (em EncMode) options() cbor.EncOptions {
+ return em.delegate.EncOptions()
+}
+
+func (em EncMode) MarshalTo(v interface{}, w io.Writer) error {
+ if buf, ok := w.(*buffer); ok {
+ return em.delegate.MarshalToBuffer(v, &buf.Buffer)
+ }
+
+ buf := buffers.Get()
+ defer buffers.Put(buf)
+ if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil {
+ return err
+ }
+
+ if _, err := io.Copy(w, buf); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (em EncMode) Marshal(v interface{}) ([]byte, error) {
+ buf := buffers.Get()
+ defer buffers.Put(buf)
+
+ if err := em.MarshalTo(v, &buf.Buffer); err != nil {
+ return nil, err
+ }
+
+ clone := make([]byte, buf.Len())
+ copy(clone, buf.Bytes())
+
+ return clone, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/transcoding.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/transcoding.go
new file mode 100644
index 0000000000..5620e9ccc9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/transcoding.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modes
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+
+ kjson "sigs.k8s.io/json"
+)
+
+type TranscodeFunc func(dst io.Writer, src io.Reader) error
+
+func (f TranscodeFunc) Transcode(dst io.Writer, src io.Reader) error {
+ return f(dst, src)
+}
+
+func TranscodeFromJSON(dst io.Writer, src io.Reader) error {
+ var tmp any
+ dec := kjson.NewDecoderCaseSensitivePreserveInts(src)
+ if err := dec.Decode(&tmp); err != nil {
+ return err
+ }
+ if err := dec.Decode(&struct{}{}); !errors.Is(err, io.EOF) {
+ return errors.New("extraneous data")
+ }
+
+ return encode.MarshalTo(tmp, dst)
+}
+
+func TranscodeToJSON(dst io.Writer, src io.Reader) error {
+ var tmp any
+ dec := decode.NewDecoder(src)
+ if err := dec.Decode(&tmp); err != nil {
+ return err
+ }
+ if err := dec.Decode(&struct{}{}); !errors.Is(err, io.EOF) {
+ return errors.New("extraneous data")
+ }
+
+ // Use an Encoder to avoid the extra []byte allocated by Marshal. Encode, unlike Marshal,
+ // appends a trailing newline to separate consecutive encodings of JSON values that aren't
+ // self-delimiting, like numbers. Strip the newline to avoid the assumption that every
+ // json.Unmarshaler implementation will accept trailing whitespace.
+ enc := json.NewEncoder(&trailingLinefeedSuppressor{delegate: dst})
+ enc.SetIndent("", "")
+ return enc.Encode(tmp)
+}
+
+// trailingLinefeedSuppressor is an io.Writer that wraps another io.Writer, suppressing a single
+// trailing linefeed if it is the last byte written by the latest call to Write.
+type trailingLinefeedSuppressor struct {
+ lf bool
+ delegate io.Writer
+}
+
+func (w *trailingLinefeedSuppressor) Write(p []byte) (int, error) {
+ if len(p) == 0 {
+ // Avoid flushing a buffered linefeeds on an empty write.
+ return 0, nil
+ }
+
+ if w.lf {
+ // The previous write had a trailing linefeed that was buffered. That wasn't the
+ // last Write call, so flush the buffered linefeed before continuing.
+ n, err := w.delegate.Write([]byte{'\n'})
+ if n > 0 {
+ w.lf = false
+ }
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ if p[len(p)-1] != '\n' {
+ return w.delegate.Write(p)
+ }
+
+ p = p[:len(p)-1]
+
+ if len(p) == 0 { // []byte{'\n'}
+ w.lf = true
+ return 1, nil
+ }
+
+ n, err := w.delegate.Write(p)
+ if n == len(p) {
+ // Everything up to the trailing linefeed has been flushed. Eat the linefeed.
+ w.lf = true
+ n++
+ }
+ return n, err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
index ff98208420..81286fccb4 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -17,9 +17,6 @@ limitations under the License.
package serializer
import (
- "mime"
- "strings"
-
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
@@ -28,41 +25,26 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
)
-// serializerExtensions are for serializers that are conditionally compiled in
-var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
-
-type serializerType struct {
- AcceptContentTypes []string
- ContentType string
- FileExtensions []string
- // EncodesAsText should be true if this content type can be represented safely in UTF-8
- EncodesAsText bool
-
- Serializer runtime.Serializer
- PrettySerializer runtime.Serializer
- StrictSerializer runtime.Serializer
-
- AcceptStreamContentTypes []string
- StreamContentType string
-
- Framer runtime.Framer
- StreamSerializer runtime.Serializer
-}
-
-func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo {
jsonSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
+ json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON},
)
- jsonSerializerType := serializerType{
- AcceptContentTypes: []string{runtime.ContentTypeJSON},
- ContentType: runtime.ContentTypeJSON,
- FileExtensions: []string{"json"},
- EncodesAsText: true,
- Serializer: jsonSerializer,
-
- Framer: json.Framer,
- StreamSerializer: jsonSerializer,
+ jsonSerializerType := runtime.SerializerInfo{
+ MediaType: runtime.ContentTypeJSON,
+ MediaTypeType: "application",
+ MediaTypeSubType: "json",
+ EncodesAsText: true,
+ Serializer: jsonSerializer,
+ StrictSerializer: json.NewSerializerWithOptions(
+ mf, scheme, scheme,
+ json.SerializerOptions{Yaml: false, Pretty: false, Strict: true, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON},
+ ),
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ EncodesAsText: true,
+ Serializer: jsonSerializer,
+ Framer: json.Framer,
+ },
}
if options.Pretty {
jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
@@ -71,12 +53,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
)
}
- strictJSONSerializer := json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
- )
- jsonSerializerType.StrictSerializer = strictJSONSerializer
-
yamlSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
@@ -85,38 +61,40 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
mf, scheme, scheme,
json.SerializerOptions{Yaml: true, Pretty: false, Strict: true},
)
- protoSerializer := protobuf.NewSerializer(scheme, scheme)
+ protoSerializer := protobuf.NewSerializerWithOptions(scheme, scheme, protobuf.SerializerOptions{
+ StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToProtobuf,
+ })
protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
- serializers := []serializerType{
+ serializers := []runtime.SerializerInfo{
jsonSerializerType,
{
- AcceptContentTypes: []string{runtime.ContentTypeYAML},
- ContentType: runtime.ContentTypeYAML,
- FileExtensions: []string{"yaml"},
- EncodesAsText: true,
- Serializer: yamlSerializer,
- StrictSerializer: strictYAMLSerializer,
+ MediaType: runtime.ContentTypeYAML,
+ MediaTypeType: "application",
+ MediaTypeSubType: "yaml",
+ EncodesAsText: true,
+ Serializer: yamlSerializer,
+ StrictSerializer: strictYAMLSerializer,
},
{
- AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
- ContentType: runtime.ContentTypeProtobuf,
- FileExtensions: []string{"pb"},
- Serializer: protoSerializer,
+ MediaType: runtime.ContentTypeProtobuf,
+ MediaTypeType: "application",
+ MediaTypeSubType: "vnd.kubernetes.protobuf",
+ Serializer: protoSerializer,
// note, strict decoding is unsupported for protobuf,
// fall back to regular serializing
StrictSerializer: protoSerializer,
-
- Framer: protobuf.LengthDelimitedFramer,
- StreamSerializer: protoRawSerializer,
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ Serializer: protoRawSerializer,
+ Framer: protobuf.LengthDelimitedFramer,
+ },
},
}
- for _, fn := range serializerExtensions {
- if serializer, ok := fn(scheme); ok {
- serializers = append(serializers, serializer)
- }
+ for _, f := range options.serializers {
+ serializers = append(serializers, f(scheme, scheme))
}
+
return serializers
}
@@ -136,6 +114,11 @@ type CodecFactoryOptions struct {
Strict bool
// Pretty includes a pretty serializer along with the non-pretty one
Pretty bool
+
+ StreamingCollectionsEncodingToJSON bool
+ StreamingCollectionsEncodingToProtobuf bool
+
+ serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo
}
// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
@@ -162,6 +145,25 @@ func DisableStrict(options *CodecFactoryOptions) {
options.Strict = false
}
+// WithSerializer configures a serializer to be supported in addition to the default serializers.
+func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo) CodecFactoryOptionsMutator {
+ return func(options *CodecFactoryOptions) {
+ options.serializers = append(options.serializers, f)
+ }
+}
+
+func WithStreamingCollectionEncodingToJSON() CodecFactoryOptionsMutator {
+ return func(options *CodecFactoryOptions) {
+ options.StreamingCollectionsEncodingToJSON = true
+ }
+}
+
+func WithStreamingCollectionEncodingToProtobuf() CodecFactoryOptionsMutator {
+ return func(options *CodecFactoryOptions) {
+ options.StreamingCollectionsEncodingToProtobuf = true
+ }
+}
+
// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
// and conversion wrappers to define preferred internal and external versions. In the future,
// as the internal version is used less, callers may instead use a defaulting serializer and
@@ -184,7 +186,7 @@ func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMuta
}
// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
-func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+func newCodecFactory(scheme *runtime.Scheme, serializers []runtime.SerializerInfo) CodecFactory {
decoders := make([]runtime.Decoder, 0, len(serializers))
var accepts []runtime.SerializerInfo
alreadyAccepted := make(map[string]struct{})
@@ -192,38 +194,20 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
var legacySerializer runtime.Serializer
for _, d := range serializers {
decoders = append(decoders, d.Serializer)
- for _, mediaType := range d.AcceptContentTypes {
- if _, ok := alreadyAccepted[mediaType]; ok {
- continue
- }
- alreadyAccepted[mediaType] = struct{}{}
- info := runtime.SerializerInfo{
- MediaType: d.ContentType,
- EncodesAsText: d.EncodesAsText,
- Serializer: d.Serializer,
- PrettySerializer: d.PrettySerializer,
- StrictSerializer: d.StrictSerializer,
- }
-
- mediaType, _, err := mime.ParseMediaType(info.MediaType)
- if err != nil {
- panic(err)
- }
- parts := strings.SplitN(mediaType, "/", 2)
- info.MediaTypeType = parts[0]
- info.MediaTypeSubType = parts[1]
-
- if d.StreamSerializer != nil {
- info.StreamSerializer = &runtime.StreamSerializerInfo{
- Serializer: d.StreamSerializer,
- EncodesAsText: d.EncodesAsText,
- Framer: d.Framer,
- }
- }
- accepts = append(accepts, info)
- if mediaType == runtime.ContentTypeJSON {
- legacySerializer = d.Serializer
- }
+ if _, ok := alreadyAccepted[d.MediaType]; ok {
+ continue
+ }
+ alreadyAccepted[d.MediaType] = struct{}{}
+
+ acceptedSerializerShallowCopy := d
+ if d.StreamSerializer != nil {
+ cloned := *d.StreamSerializer
+ acceptedSerializerShallowCopy.StreamSerializer = &cloned
+ }
+ accepts = append(accepts, acceptedSerializerShallowCopy)
+
+ if d.MediaType == runtime.ContentTypeJSON {
+ legacySerializer = d.Serializer
}
}
if legacySerializer == nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go
new file mode 100644
index 0000000000..075163ddd8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "sort"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/conversion"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func streamEncodeCollections(obj runtime.Object, w io.Writer) (bool, error) {
+ list, ok := obj.(*unstructured.UnstructuredList)
+ if ok {
+ return true, streamingEncodeUnstructuredList(w, list)
+ }
+ if _, ok := obj.(json.Marshaler); ok {
+ return false, nil
+ }
+ typeMeta, listMeta, items, err := getListMeta(obj)
+ if err == nil {
+ return true, streamingEncodeList(w, typeMeta, listMeta, items)
+ }
+ return false, nil
+}
+
+// getListMeta implements list extraction logic for json stream serialization.
+//
+// Reason for a custom logic instead of reusing accessors from meta package:
+// * Validate json tags to prevent incompatibility with json standard package.
+// * ListMetaAccessor doesn't distinguish empty from nil value.
+// * TypeAccessort reparsing "apiVersion" and serializing it with "{group}/{version}"
+func getListMeta(list runtime.Object) (metav1.TypeMeta, metav1.ListMeta, []runtime.Object, error) {
+ listValue, err := conversion.EnforcePtr(list)
+ if err != nil {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err
+ }
+ listType := listValue.Type()
+ if listType.NumField() != 3 {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListType to have 3 fields")
+ }
+ // TypeMeta
+ typeMeta, ok := listValue.Field(0).Interface().(metav1.TypeMeta)
+ if !ok {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected TypeMeta field to have TypeMeta type")
+ }
+ if listType.Field(0).Tag.Get("json") != ",inline" {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected TypeMeta json field tag to be ",inline"`)
+ }
+ // ListMeta
+ listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta)
+ if !ok {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListMeta field to have ListMeta type")
+ }
+ if listType.Field(1).Tag.Get("json") != "metadata,omitempty" {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected ListMeta json field tag to be "metadata,omitempty"`)
+ }
+ // Items
+ items, err := meta.ExtractList(list)
+ if err != nil {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err
+ }
+ if listType.Field(2).Tag.Get("json") != "items" {
+ return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected Items json field tag to be "items"`)
+ }
+ return typeMeta, listMeta, items, nil
+}
+
+func streamingEncodeList(w io.Writer, typeMeta metav1.TypeMeta, listMeta metav1.ListMeta, items []runtime.Object) error {
+ // Start
+ if _, err := w.Write([]byte(`{`)); err != nil {
+ return err
+ }
+
+ // TypeMeta
+ if typeMeta.Kind != "" {
+ if err := encodeKeyValuePair(w, "kind", typeMeta.Kind, []byte(",")); err != nil {
+ return err
+ }
+ }
+ if typeMeta.APIVersion != "" {
+ if err := encodeKeyValuePair(w, "apiVersion", typeMeta.APIVersion, []byte(",")); err != nil {
+ return err
+ }
+ }
+
+ // ListMeta
+ if err := encodeKeyValuePair(w, "metadata", listMeta, []byte(",")); err != nil {
+ return err
+ }
+
+ // Items
+ if err := encodeItemsObjectSlice(w, items); err != nil {
+ return err
+ }
+
+ // End
+ _, err := w.Write([]byte("}\n"))
+ return err
+}
+
+func encodeItemsObjectSlice(w io.Writer, items []runtime.Object) (err error) {
+ if items == nil {
+ err := encodeKeyValuePair(w, "items", nil, nil)
+ return err
+ }
+ _, err = w.Write([]byte(`"items":[`))
+ if err != nil {
+ return err
+ }
+ suffix := []byte(",")
+ for i, item := range items {
+ if i == len(items)-1 {
+ suffix = nil
+ }
+ err := encodeValue(w, item, suffix)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = w.Write([]byte("]"))
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func streamingEncodeUnstructuredList(w io.Writer, list *unstructured.UnstructuredList) error {
+ _, err := w.Write([]byte(`{`))
+ if err != nil {
+ return err
+ }
+ keys := slices.Collect(maps.Keys(list.Object))
+ if _, exists := list.Object["items"]; !exists {
+ keys = append(keys, "items")
+ }
+ sort.Strings(keys)
+
+ suffix := []byte(",")
+ for i, key := range keys {
+ if i == len(keys)-1 {
+ suffix = nil
+ }
+ if key == "items" {
+ err = encodeItemsUnstructuredSlice(w, list.Items, suffix)
+ } else {
+ err = encodeKeyValuePair(w, key, list.Object[key], suffix)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ _, err = w.Write([]byte("}\n"))
+ return err
+}
+
+func encodeItemsUnstructuredSlice(w io.Writer, items []unstructured.Unstructured, suffix []byte) (err error) {
+ _, err = w.Write([]byte(`"items":[`))
+ if err != nil {
+ return err
+ }
+ comma := []byte(",")
+ for i, item := range items {
+ if i == len(items)-1 {
+ comma = nil
+ }
+ err := encodeValue(w, item.Object, comma)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = w.Write([]byte("]"))
+ if err != nil {
+ return err
+ }
+ if len(suffix) > 0 {
+ _, err = w.Write(suffix)
+ }
+ return err
+}
+
+func encodeKeyValuePair(w io.Writer, key string, value any, suffix []byte) (err error) {
+ err = encodeValue(w, key, []byte(":"))
+ if err != nil {
+ return err
+ }
+ err = encodeValue(w, value, suffix)
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func encodeValue(w io.Writer, value any, suffix []byte) error {
+ data, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ if err != nil {
+ return err
+ }
+ if len(suffix) > 0 {
+ _, err = w.Write(suffix)
+ }
+ return err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
index 1ae4a32eb7..24f66a1017 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
@@ -36,7 +36,7 @@ import (
// is not nil, the object has the group, version, and kind fields set.
// Deprecated: use NewSerializerWithOptions instead.
func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
+ return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false, false})
}
// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
@@ -44,7 +44,7 @@ func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtim
// matches JSON, and will error if constructs are used that do not serialize to JSON.
// Deprecated: use NewSerializerWithOptions instead.
func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
+ return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false, false})
}
// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
@@ -93,6 +93,9 @@ type SerializerOptions struct {
// Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
// Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
Strict bool
+
+ // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed.
+ StreamingCollectionsEncoding bool
}
// Serializer handles encoding versioned objects into the proper JSON form
@@ -242,6 +245,15 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
_, err = w.Write(data)
return err
}
+ if s.options.StreamingCollectionsEncoding {
+ ok, err := streamEncodeCollections(obj, w)
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ }
encoder := json.NewEncoder(w)
return encoder.Encode(obj)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go
new file mode 100644
index 0000000000..afac03e9da
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go
@@ -0,0 +1,176 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package protobuf
+
+import (
+ "errors"
+ "io"
+ "math/bits"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+ errFieldCount = errors.New("expected ListType to have 3 fields")
+ errTypeMetaField = errors.New("expected TypeMeta field to have TypeMeta type")
+ errTypeMetaProtobufTag = errors.New(`expected TypeMeta protobuf field tag to be ""`)
+ errListMetaField = errors.New("expected ListMeta field to have ListMeta type")
+ errListMetaProtobufTag = errors.New(`expected ListMeta protobuf field tag to be "bytes,1,opt,name=metadata"`)
+ errItemsProtobufTag = errors.New(`expected Items protobuf field tag to be "bytes,2,rep,name=items"`)
+ errItemsSizer = errors.New(`expected Items elements to implement proto.Sizer`)
+)
+
+// getStreamingListData implements list extraction logic for protobuf stream serialization.
+//
+// Reason for a custom logic instead of reusing accessors from meta package:
+// * Validate proto tags to prevent incompatibility with proto standard package.
+// * ListMetaAccessor doesn't distinguish empty from nil value.
+// * TypeAccessor reparsing "apiVersion" and serializing it with "{group}/{version}"
+func getStreamingListData(list runtime.Object) (data streamingListData, err error) {
+ listValue, err := conversion.EnforcePtr(list)
+ if err != nil {
+ return data, err
+ }
+ listType := listValue.Type()
+ if listType.NumField() != 3 {
+ return data, errFieldCount
+ }
+ // TypeMeta: validated, but not returned as is not serialized.
+ _, ok := listValue.Field(0).Interface().(metav1.TypeMeta)
+ if !ok {
+ return data, errTypeMetaField
+ }
+ if listType.Field(0).Tag.Get("protobuf") != "" {
+ return data, errTypeMetaProtobufTag
+ }
+ // ListMeta
+ listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta)
+ if !ok {
+ return data, errListMetaField
+ }
+ // if we were ever to relax the protobuf tag check we should update the hardcoded `0xa` below when writing ListMeta.
+ if listType.Field(1).Tag.Get("protobuf") != "bytes,1,opt,name=metadata" {
+ return data, errListMetaProtobufTag
+ }
+ data.listMeta = listMeta
+ // Items; if we were ever to relax the protobuf tag check we should update the hardcoded `0x12` below when writing Items.
+ if listType.Field(2).Tag.Get("protobuf") != "bytes,2,rep,name=items" {
+ return data, errItemsProtobufTag
+ }
+ items, err := meta.ExtractList(list)
+ if err != nil {
+ return data, err
+ }
+ data.items = items
+ data.totalSize, data.listMetaSize, data.itemsSizes, err = listSize(listMeta, items)
+ return data, err
+}
+
+type streamingListData struct {
+ // totalSize is the total size of the serialized List object, including their proto headers/size bytes
+ totalSize int
+
+ // listMetaSize caches results from .Size() call to listMeta, doesn't include header bytes (field identifier, size)
+ listMetaSize int
+ listMeta metav1.ListMeta
+
+ // itemsSizes caches results from .Size() call to items, doesn't include header bytes (field identifier, size)
+ itemsSizes []int
+ items []runtime.Object
+}
+
+type sizer interface {
+ Size() int
+}
+
+// listSize return size of ListMeta and items to be later used for preallocations.
+// listMetaSize and itemSizes do not include header bytes (field identifier, size).
+func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, listMetaSize int, itemSizes []int, err error) {
+ // ListMeta
+ listMetaSize = listMeta.Size()
+ totalSize += 1 + sovGenerated(uint64(listMetaSize)) + listMetaSize
+ // Items
+ itemSizes = make([]int, len(items))
+ for i, item := range items {
+ sizer, ok := item.(sizer)
+ if !ok {
+ return totalSize, listMetaSize, nil, errItemsSizer
+ }
+ n := sizer.Size()
+ itemSizes[i] = n
+ totalSize += 1 + sovGenerated(uint64(n)) + n
+ }
+ return totalSize, listMetaSize, itemSizes, nil
+}
+
+func streamingEncodeUnknownList(w io.Writer, unk runtime.Unknown, listData streamingListData, memAlloc runtime.MemoryAllocator) error {
+ _, err := w.Write(protoEncodingPrefix)
+ if err != nil {
+ return err
+ }
+ // encodeList is responsible for encoding the List into the unknown Raw.
+ encodeList := func(writer io.Writer) (int, error) {
+ return streamingEncodeList(writer, listData, memAlloc)
+ }
+ _, err = unk.MarshalToWriter(w, listData.totalSize, encodeList)
+ return err
+}
+
+func streamingEncodeList(w io.Writer, listData streamingListData, memAlloc runtime.MemoryAllocator) (size int, err error) {
+ // ListMeta; 0xa = (1 << 3) | 2; field number: 1, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
+ n, err := doEncodeWithHeader(&listData.listMeta, w, 0xa, listData.listMetaSize, memAlloc)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ // Items; 0x12 = (2 << 3) | 2; field number: 2, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
+ for i, item := range listData.items {
+ n, err := doEncodeWithHeader(item, w, 0x12, listData.itemsSizes[i], memAlloc)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+ return size, nil
+}
+
+func writeVarintGenerated(w io.Writer, v int) (int, error) {
+ buf := make([]byte, sovGenerated(uint64(v)))
+ encodeVarintGenerated(buf, len(buf), uint64(v))
+ return w.Write(buf)
+}
+
+// sovGenerated is copied from `generated.pb.go` returns size of varint.
+func sovGenerated(v uint64) int {
+ return (bits.Len64(v|1) + 6) / 7
+}
+
+// encodeVarintGenerated is copied from `generated.pb.go` encodes varint.
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
index 72d0ac79b3..381748d69f 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package protobuf provides a Kubernetes serializer for the protobuf format.
-package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
+package protobuf
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
index c63e6dc63f..67a2d12474 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
@@ -23,8 +23,6 @@ import (
"net/http"
"reflect"
- "github.com/gogo/protobuf/proto"
-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -72,10 +70,18 @@ func IsNotMarshalable(err error) bool {
// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
// as-is (any type info passed with the object will be used).
func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+ return NewSerializerWithOptions(creater, typer, SerializerOptions{})
+}
+
+// NewSerializerWithOptions creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
+// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
+// as-is (any type info passed with the object will be used).
+func NewSerializerWithOptions(creater runtime.ObjectCreater, typer runtime.ObjectTyper, opts SerializerOptions) *Serializer {
return &Serializer{
prefix: protoEncodingPrefix,
creater: creater,
typer: typer,
+ options: opts,
}
}
@@ -84,6 +90,14 @@ type Serializer struct {
prefix []byte
creater runtime.ObjectCreater
typer runtime.ObjectTyper
+
+ options SerializerOptions
+}
+
+// SerializerOptions holds the options which are used to configure a Proto serializer.
+type SerializerOptions struct {
+ // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed.
+ StreamingCollectionsEncoding bool
}
var _ runtime.Serializer = &Serializer{}
@@ -132,11 +146,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
types, _, err := s.typer.ObjectKinds(into)
switch {
case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
+ unmarshaler, ok := into.(unmarshaler)
if !ok {
return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
}
- if err := proto.Unmarshal(unk.Raw, pb); err != nil {
+ // top-level unmarshal resets before delegating unmarshaling to the object
+ unmarshaler.Reset()
+ if err := unmarshaler.Unmarshal(unk.Raw); err != nil {
return nil, &actual, err
}
return into, &actual, nil
@@ -209,6 +225,13 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.
},
}
}
+ if s.options.StreamingCollectionsEncoding {
+ listData, err := getStreamingListData(obj)
+ if err == nil {
+ // Doesn't honor custom proto marshaling methods (like json streaming), because all proto objects implement proto methods.
+ return streamingEncodeUnknownList(w, unk, listData, memAlloc)
+ }
+ }
switch t := obj.(type) {
case bufferedMarshaller:
@@ -228,7 +251,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.
_, err = w.Write(data[:prefixSize+uint64(i)])
return err
- case proto.Marshaler:
+ case unbufferedMarshaller:
// this path performs extra allocations
data, err := t.Marshal()
if err != nil {
@@ -283,16 +306,27 @@ func copyKindDefaults(dst, src *schema.GroupVersionKind) {
// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
// byte buffers by pre-calculating the size of the final buffer needed.
type bufferedMarshaller interface {
- proto.Sizer
runtime.ProtobufMarshaller
}
// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently.
type bufferedReverseMarshaller interface {
- proto.Sizer
runtime.ProtobufReverseMarshaller
}
+type unbufferedMarshaller interface {
+ Marshal() ([]byte, error)
+}
+
+// unmarshaler is the subset of gogo Message and Unmarshaler used by unmarshal
+type unmarshaler interface {
+ // Reset() is called on the top-level message before unmarshaling,
+ // and clears all existing data from the message instance.
+ Reset()
+ // Unmarshal decodes from the start of the data into the message.
+ Unmarshal([]byte) error
+}
+
// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
// object with a nil RawJSON struct and the expected size of the provided buffer. The
// returned size will not be correct if RawJSOn is set on unk.
@@ -358,11 +392,13 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind
types, _, err := s.typer.ObjectKinds(into)
switch {
case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
+ unmarshaler, ok := into.(unmarshaler)
if !ok {
return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
}
- if err := proto.Unmarshal(data, pb); err != nil {
+ // top-level unmarshal resets before delegating unmarshaling to the object
+ unmarshaler.Reset()
+ if err := unmarshaler.Unmarshal(data); err != nil {
return nil, actual, err
}
return into, actual, nil
@@ -396,11 +432,13 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater,
return nil, actual, err
}
- pb, ok := obj.(proto.Message)
+ unmarshaler, ok := obj.(unmarshaler)
if !ok {
return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
}
- if err := proto.Unmarshal(data, pb); err != nil {
+ // top-level unmarshal resets before delegating unmarshaling to the object
+ unmarshaler.Reset()
+ if err := unmarshaler.Unmarshal(data); err != nil {
return nil, actual, err
}
if actual != nil {
@@ -428,6 +466,39 @@ func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime
}
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
+ _, err := doEncode(obj, w, nil, memAlloc)
+ return err
+}
+
+func doEncodeWithHeader(obj any, w io.Writer, field byte, precomputedSize int, memAlloc runtime.MemoryAllocator) (size int, err error) {
+ // Field identifier
+ n, err := w.Write([]byte{field})
+ size += n
+ if err != nil {
+ return size, err
+ }
+ // Size
+ n, err = writeVarintGenerated(w, precomputedSize)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ // Obj
+ n, err = doEncode(obj, w, &precomputedSize, memAlloc)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ if n != precomputedSize {
+ return size, fmt.Errorf("the size value was %d, but doEncode wrote %d bytes to data", precomputedSize, n)
+ }
+ return size, nil
+}
+
+// doEncode encodes provided object into writer using a allocator if possible.
+// Avoids call by object Size if precomputedObjSize is provided.
+// precomputedObjSize should not include header bytes (field identifier, size).
+func doEncode(obj any, w io.Writer, precomputedObjSize *int, memAlloc runtime.MemoryAllocator) (int, error) {
if memAlloc == nil {
klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
memAlloc = &runtime.SimpleAllocator{}
@@ -436,40 +507,43 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runti
case bufferedReverseMarshaller:
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods
- encodedSize := uint64(t.Size())
- data := memAlloc.Allocate(encodedSize)
+ if precomputedObjSize == nil {
+ s := t.Size()
+ precomputedObjSize = &s
+ }
+ data := memAlloc.Allocate(uint64(*precomputedObjSize))
n, err := t.MarshalToSizedBuffer(data)
if err != nil {
- return err
+ return 0, err
}
- _, err = w.Write(data[:n])
- return err
+ return w.Write(data[:n])
case bufferedMarshaller:
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalTo methods
- encodedSize := uint64(t.Size())
- data := memAlloc.Allocate(encodedSize)
+ if precomputedObjSize == nil {
+ s := t.Size()
+ precomputedObjSize = &s
+ }
+ data := memAlloc.Allocate(uint64(*precomputedObjSize))
n, err := t.MarshalTo(data)
if err != nil {
- return err
+ return 0, err
}
- _, err = w.Write(data[:n])
- return err
+ return w.Write(data[:n])
- case proto.Marshaler:
+ case unbufferedMarshaller:
// this path performs extra allocations
data, err := t.Marshal()
if err != nil {
- return err
+ return 0, err
}
- _, err = w.Write(data)
- return err
+ return w.Write(data)
default:
- return errNotMarshalable{reflect.TypeOf(obj)}
+ return 0, errNotMarshalable{reflect.TypeOf(obj)}
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
index ce77c7910a..ca7b7cc2d4 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
@@ -43,9 +43,11 @@ type TypeMeta struct {
}
const (
- ContentTypeJSON string = "application/json"
- ContentTypeYAML string = "application/yaml"
- ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
+ ContentTypeJSON string = "application/json"
+ ContentTypeYAML string = "application/yaml"
+ ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
+ ContentTypeCBOR string = "application/cbor" // RFC 8949
+ ContentTypeCBORSequence string = "application/cbor-seq" // RFC 8742
)
// RawExtension is used to hold extensions in external versions.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
index a82227b239..70c4ea8c56 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
@@ -18,16 +18,145 @@ package runtime
import (
"fmt"
+ "io"
)
+// ProtobufReverseMarshaller can precompute size, and marshals to the start of the provided data buffer.
type ProtobufMarshaller interface {
+ // Size returns the number of bytes a call to MarshalTo would consume.
+ Size() int
+ // MarshalTo marshals to the start of the data buffer, which must be at least as big as Size(),
+ // and returns the number of bytes written, which must be identical to the return value of Size().
MarshalTo(data []byte) (int, error)
}
+// ProtobufReverseMarshaller can precompute size, and marshals to the end of the provided data buffer.
type ProtobufReverseMarshaller interface {
+ // Size returns the number of bytes a call to MarshalToSizedBuffer would consume.
+ Size() int
+ // MarshalToSizedBuffer marshals to the end of the data buffer, which must be at least as big as Size(),
+ // and returns the number of bytes written, which must be identical to the return value of Size().
MarshalToSizedBuffer(data []byte) (int, error)
}
+const (
+ typeMetaTag = 0xa
+ rawTag = 0x12
+ contentEncodingTag = 0x1a
+ contentTypeTag = 0x22
+
+ // max length of a varint for a uint64
+ maxUint64VarIntLength = 10
+)
+
+// MarshalToWriter allows a caller to provide a streaming writer for raw bytes,
+// instead of populating them inside the Unknown struct.
+// rawSize is the number of bytes rawWriter will write in a success case.
+// writeRaw is called when it is time to write the raw bytes. It must return `rawSize, nil` or an error.
+func (m *Unknown) MarshalToWriter(w io.Writer, rawSize int, writeRaw func(io.Writer) (int, error)) (int, error) {
+ size := 0
+
+ // reuse the buffer for varint marshaling
+ varintBuffer := make([]byte, maxUint64VarIntLength)
+ writeVarint := func(i int) (int, error) {
+ offset := encodeVarintGenerated(varintBuffer, len(varintBuffer), uint64(i))
+ return w.Write(varintBuffer[offset:])
+ }
+
+ // TypeMeta
+ {
+ n, err := w.Write([]byte{typeMetaTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ typeMetaBytes, err := m.TypeMeta.Marshal()
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(len(typeMetaBytes))
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = w.Write(typeMetaBytes)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+
+ // Raw, delegating write to writeRaw()
+ {
+ n, err := w.Write([]byte{rawTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(rawSize)
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeRaw(w)
+ size += n
+ if err != nil {
+ return size, err
+ }
+ if n != int(rawSize) {
+ return size, fmt.Errorf("the size value was %d, but encoding wrote %d bytes to data", rawSize, n)
+ }
+ }
+
+ // ContentEncoding
+ {
+ n, err := w.Write([]byte{contentEncodingTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(len(m.ContentEncoding))
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = w.Write([]byte(m.ContentEncoding))
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+
+ // ContentEncoding
+ {
+ n, err := w.Write([]byte{contentTypeTag})
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = writeVarint(len(m.ContentType))
+ size += n
+ if err != nil {
+ return size, err
+ }
+
+ n, err = w.Write([]byte(m.ContentType))
+ size += n
+ if err != nil {
+ return size, err
+ }
+ }
+ return size, nil
+}
+
// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller.
func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
@@ -43,12 +172,12 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
copy(data[i:], m.ContentType)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
i--
- data[i] = 0x22
+ data[i] = contentTypeTag
i -= len(m.ContentEncoding)
copy(data[i:], m.ContentEncoding)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
i--
- data[i] = 0x1a
+ data[i] = contentEncodingTag
if b != nil {
if r, ok := b.(ProtobufReverseMarshaller); ok {
n1, err := r.MarshalToSizedBuffer(data[:i])
@@ -75,7 +204,7 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
}
i = encodeVarintGenerated(data, i, size)
i--
- data[i] = 0x12
+ data[i] = rawTag
}
n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i])
if err != nil {
@@ -84,6 +213,6 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
i -= n2
i = encodeVarintGenerated(data, i, uint64(n2))
i--
- data[i] = 0xa
+ data[i] = typeMetaTag
return msgSize - i, nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.model_name.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.model_name.go
new file mode 100644
index 0000000000..cf3ec4dceb
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.model_name.go
@@ -0,0 +1,92 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package runtime
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Allocator) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Allocator"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NegotiateError) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.NegotiateError"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NoopDecoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.NoopDecoder"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NoopEncoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.NoopEncoder"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Pair) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Pair"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RawExtension) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.RawExtension"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scheme) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Scheme"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SerializerInfo) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.SerializerInfo"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SimpleAllocator) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.SimpleAllocator"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StreamSerializerInfo) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.StreamSerializerInfo"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypeMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.TypeMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Unknown) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Unknown"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WithVersionEncoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.WithVersionEncoder"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WithoutVersionDecoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.WithoutVersionDecoder"
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go
index 5667fa9921..783cbcdc8d 100644
--- a/vendor/k8s.io/apimachinery/pkg/types/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package types implements various generic types used throughout kubernetes.
-package types // import "k8s.io/apimachinery/pkg/types"
+package types
diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go
index fe8ecaaffa..d338cf213d 100644
--- a/vendor/k8s.io/apimachinery/pkg/types/patch.go
+++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go
@@ -25,5 +25,7 @@ const (
JSONPatchType PatchType = "application/json-patch+json"
MergePatchType PatchType = "application/merge-patch+json"
StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
- ApplyPatchType PatchType = "application/apply-patch+yaml"
+ ApplyPatchType PatchType = ApplyYAMLPatchType
+ ApplyYAMLPatchType PatchType = "application/apply-patch+yaml"
+ ApplyCBORPatchType PatchType = "application/apply-patch+cbor"
)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
index 5d4d6250a3..b3b39bc388 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package errors implements various utility functions and types around errors.
-package errors // import "k8s.io/apimachinery/pkg/util/errors"
+package errors
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
index 1b60d145c6..6f458d13d7 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
@@ -24,6 +24,7 @@ import (
)
// MessageCountMap contains occurrence for each error message.
+// Deprecated: Not used anymore in the k8s.io codebase, use `errors.Join` instead.
type MessageCountMap map[string]int
// Aggregate represents an object that contains multiple errors, but does not
@@ -199,6 +200,7 @@ func Flatten(agg Aggregate) Aggregate {
}
// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate
+// Deprecated: Not used anymore in the k8s.io codebase, use `errors.Join` instead.
func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
if m == nil {
return nil
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
index 9b3c9c8d5a..f18845a417 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
@@ -91,12 +91,12 @@ func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
}
n, err := io.ReadAtLeast(r.r, data[:max], int(max))
r.remaining -= n
- if err == io.ErrShortBuffer || r.remaining > 0 {
- return n, io.ErrShortBuffer
- }
if err != nil {
return n, err
}
+ if r.remaining > 0 {
+ return n, io.ErrShortBuffer
+ }
if n != expect {
return n, io.ErrUnexpectedEOF
}
@@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
// data written to data, or be larger than data and a different array.
- n := len(data)
m := json.RawMessage(data[:0])
if err := r.decoder.Decode(&m); err != nil {
return 0, err
@@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// If capacity of data is less than length of the message, decoder will allocate a new slice
// and set m to it, which means we need to copy the partial result back into data and preserve
// the remaining result for subsequent reads.
- if len(m) > n {
- //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], m[:n]...)
- r.remaining = m[n:]
- return n, io.ErrShortBuffer
+ if len(m) > cap(data) {
+ copy(data, m)
+ r.remaining = m[len(data):]
+ return len(data), io.ErrShortBuffer
+ }
+
+ if len(m) > len(data) {
+ // The bytes beyond len(data) were stored in data's underlying array, which we do
+ // not own after this function returns.
+ r.remaining = append([]byte(nil), m[len(data):]...)
+ return len(data), io.ErrShortBuffer
}
+
return len(m), nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
index 5893df5bd2..1da83f14b1 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package httpstream adds multiplexed streaming support to HTTP requests and
// responses via connection upgrades.
-package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"
+package httpstream
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
index a32fce5a0c..a7c8d897dc 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
@@ -116,6 +116,15 @@ func IsUpgradeFailure(err error) bool {
return errors.As(err, &upgradeErr)
}
+// isHTTPSProxyError returns true if error is Gorilla/Websockets HTTPS Proxy dial error;
+// false otherwise (see https://github.com/kubernetes/kubernetes/issues/126134).
+func IsHTTPSProxyError(err error) bool {
+ if err == nil {
+ return false
+ }
+ return strings.Contains(err.Error(), "proxy: unknown scheme: https")
+}
+
// IsUpgradeRequest returns true if the given request is a connection upgrade request
func IsUpgradeRequest(req *http.Request) bool {
for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
@@ -151,19 +160,31 @@ func commaSeparatedHeaderValues(header []string) []string {
// Handshake performs a subprotocol negotiation. If the client did request a
// subprotocol, Handshake will select the first common value found in
-// serverProtocols. If a match is found, Handshake adds a response header
-// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
-// returned, along with a response header containing the list of protocols the
-// server can accept.
+// serverProtocols, otherwise it will return an error and write an HTTP BadRequest to the response.
+// If a match is found, Handshake adds a response header indicating the chosen subprotocol.
+// If no match is found, HTTP forbidden is returned, along with a response header containing
+// the list of protocols the server can accept.
func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
- clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)])
- if len(clientProtocols) == 0 {
- return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
- }
-
if len(serverProtocols) == 0 {
panic(fmt.Errorf("unable to upgrade: serverProtocols is required"))
}
+ values, ok := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]
+ if !ok {
+ err := fmt.Errorf("unable to upgrade: header %s does not exist in request with %d headers", HeaderProtocolVersion, len(req.Header))
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return "", err
+ }
+ if len(values) == 0 {
+ err := fmt.Errorf("unable to upgrade: header %s is empty", HeaderProtocolVersion)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return "", err
+ }
+ clientProtocols := commaSeparatedHeaderValues(values)
+ if len(clientProtocols) == 0 {
+ err := fmt.Errorf("unable to upgrade: header %s contains %s, but no valid protocols", HeaderProtocolVersion, values)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return "", err
+ }
negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
if len(negotiatedProtocol) == 0 {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
index c78326fa3b..ed131d112f 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
@@ -194,7 +194,7 @@ func (s *SpdyRoundTripper) dialWithHttpProxy(req *http.Request, proxyURL *url.UR
// proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
proxyReq := http.Request{
- Method: "CONNECT",
+ Method: http.MethodConnect,
URL: &url.URL{},
Host: targetHost,
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
index 8f9ced93fb..5be552e1eb 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
+// source: k8s.io/apimachinery/pkg/util/intstr/generated.proto
package intstr
@@ -23,81 +23,10 @@ import (
fmt "fmt"
io "io"
- math "math"
math_bits "math/bits"
-
- proto "github.com/gogo/protobuf/proto"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *IntOrString) Reset() { *m = IntOrString{} }
-func (*IntOrString) ProtoMessage() {}
-func (*IntOrString) Descriptor() ([]byte, []int) {
- return fileDescriptor_94e046ae3ce6121c, []int{0}
-}
-func (m *IntOrString) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IntOrString) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IntOrString.Merge(m, src)
-}
-func (m *IntOrString) XXX_Size() int {
- return m.Size()
-}
-func (m *IntOrString) XXX_DiscardUnknown() {
- xxx_messageInfo_IntOrString.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IntOrString proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c)
-}
-
-var fileDescriptor_94e046ae3ce6121c = []byte{
- // 292 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xb1, 0x4a, 0x03, 0x31,
- 0x1c, 0xc6, 0x13, 0x5b, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x58, 0x90, 0x5b, 0x4c,
- 0x56, 0x71, 0xec, 0x56, 0x10, 0x84, 0x56, 0x1c, 0xdc, 0xee, 0xda, 0x98, 0x86, 0x6b, 0x93, 0x90,
- 0xfb, 0x9f, 0x70, 0x5b, 0x1f, 0x41, 0x37, 0x47, 0x1f, 0xe7, 0xc6, 0x8e, 0x1d, 0xa4, 0x78, 0xf1,
- 0x2d, 0x9c, 0xe4, 0x72, 0x07, 0x3a, 0x3a, 0x25, 0xdf, 0xf7, 0xfd, 0x7e, 0x19, 0x12, 0xdc, 0xa6,
- 0xd7, 0x19, 0x95, 0x9a, 0xa5, 0x79, 0xc2, 0xad, 0xe2, 0xc0, 0x33, 0xf6, 0xcc, 0xd5, 0x42, 0x5b,
- 0xd6, 0x0e, 0xb1, 0x91, 0xeb, 0x78, 0xbe, 0x94, 0x8a, 0xdb, 0x82, 0x99, 0x54, 0xb0, 0x1c, 0xe4,
- 0x8a, 0x49, 0x05, 0x19, 0x58, 0x26, 0xb8, 0xe2, 0x36, 0x06, 0xbe, 0xa0, 0xc6, 0x6a, 0xd0, 0xfd,
- 0x51, 0x23, 0xd1, 0xbf, 0x12, 0x35, 0xa9, 0xa0, 0xb5, 0x44, 0x1b, 0xe9, 0xfc, 0x4a, 0x48, 0x58,
- 0xe6, 0x09, 0x9d, 0xeb, 0x35, 0x13, 0x5a, 0x68, 0xe6, 0xdd, 0x24, 0x7f, 0xf2, 0xc9, 0x07, 0x7f,
- 0x6b, 0xde, 0xbc, 0x78, 0xc5, 0xc1, 0xc9, 0x44, 0xc1, 0x9d, 0x9d, 0x81, 0x95, 0x4a, 0xf4, 0xa3,
- 0xa0, 0x0b, 0x85, 0xe1, 0x03, 0x1c, 0xe2, 0xa8, 0x33, 0x3e, 0x2b, 0xf7, 0x43, 0xe4, 0xf6, 0xc3,
- 0xee, 0x7d, 0x61, 0xf8, 0x77, 0x7b, 0x4e, 0x3d, 0xd1, 0xbf, 0x0c, 0x7a, 0x52, 0xc1, 0x43, 0xbc,
- 0x1a, 0x1c, 0x84, 0x38, 0x3a, 0x1c, 0x9f, 0xb6, 0x6c, 0x6f, 0xe2, 0xdb, 0x69, 0xbb, 0xd6, 0x5c,
- 0x06, 0xb6, 0xe6, 0x3a, 0x21, 0x8e, 0x8e, 0x7f, 0xb9, 0x99, 0x6f, 0xa7, 0xed, 0x7a, 0x73, 0xf4,
- 0xf6, 0x3e, 0x44, 0x9b, 0x8f, 0x10, 0x8d, 0x27, 0x65, 0x45, 0xd0, 0xb6, 0x22, 0x68, 0x57, 0x11,
- 0xb4, 0x71, 0x04, 0x97, 0x8e, 0xe0, 0xad, 0x23, 0x78, 0xe7, 0x08, 0xfe, 0x74, 0x04, 0xbf, 0x7c,
- 0x11, 0xf4, 0x38, 0xfa, 0xc7, 0x17, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xc4, 0xf0, 0xa0,
- 0x81, 0x01, 0x00, 0x00,
-}
+func (m *IntOrString) Reset() { *m = IntOrString{} }
func (m *IntOrString) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
index 7c63c5e45a..e3d26a59a5 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
@@ -33,6 +33,7 @@ option go_package = "k8s.io/apimachinery/pkg/util/intstr";
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.util.intstr
message IntOrString {
optional int64 type = 1;
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.protomessage.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.protomessage.pb.go
new file mode 100644
index 0000000000..2853a01830
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.protomessage.pb.go
@@ -0,0 +1,24 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package intstr
+
+func (*IntOrString) ProtoMessage() {}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
index a502b5adb6..2d6f6a0ccc 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
@@ -20,24 +20,24 @@ limitations under the License.
package intstr
import (
- fuzz "github.com/google/gofuzz"
+ "sigs.k8s.io/randfill"
)
-// Fuzz satisfies fuzz.Interface
-func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
+// RandFill satisfies randfill.NativeSelfFiller
+func (intstr *IntOrString) RandFill(c randfill.Continue) {
if intstr == nil {
return
}
- if c.RandBool() {
+ if c.Bool() {
intstr.Type = Int
- c.Fuzz(&intstr.IntVal)
+ c.Fill(&intstr.IntVal)
intstr.StrVal = ""
} else {
intstr.Type = String
intstr.IntVal = 0
- c.Fuzz(&intstr.StrVal)
+ c.Fill(&intstr.StrVal)
}
}
// ensure IntOrString implements fuzz.Interface
-var _ fuzz.Interface = &IntOrString{}
+var _ randfill.NativeSelfFiller = &IntOrString{}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index f358c794d1..f372ae589c 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -25,6 +25,7 @@ import (
"strconv"
"strings"
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
"k8s.io/klog/v2"
)
@@ -37,6 +38,7 @@ import (
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.util.intstr
type IntOrString struct {
Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"`
IntVal int32 `protobuf:"varint,2,opt,name=intVal"`
@@ -92,6 +94,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
return json.Unmarshal(value, &intstr.IntVal)
}
+func (intstr *IntOrString) UnmarshalCBOR(value []byte) error {
+ if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil {
+ intstr.Type = String
+ return nil
+ }
+
+ if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil {
+ return err
+ }
+
+ intstr.Type = Int
+ return nil
+}
+
// String returns the string value, or the Itoa of the int value.
func (intstr *IntOrString) String() string {
if intstr == nil {
@@ -126,6 +142,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) {
}
}
+func (intstr IntOrString) MarshalCBOR() ([]byte, error) {
+ switch intstr.Type {
+ case Int:
+ return cbor.Marshal(intstr.IntVal)
+ case String:
+ return cbor.Marshal(intstr.StrVal)
+ default:
+ return nil, fmt.Errorf("impossible IntOrString.Type")
+ }
+}
+
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/zz_generated.model_name.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/zz_generated.model_name.go
new file mode 100644
index 0000000000..b2d6e0ae3c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/zz_generated.model_name.go
@@ -0,0 +1,27 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package intstr
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IntOrString) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.util.intstr.IntOrString"
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go
index d2ce66c1bd..aef858b26d 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go
@@ -20,8 +20,8 @@ import (
"bytes"
"fmt"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
- "sigs.k8s.io/structured-merge-diff/v4/typed"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
index 978ffb3c3e..b1e621f390 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
@@ -19,11 +19,12 @@ package managedfields
import (
"fmt"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/managedfields/internal"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// FieldManager updates the managed fields and merges applied
@@ -32,7 +33,7 @@ type FieldManager = internal.FieldManager
// NewDefaultFieldManager creates a new FieldManager that merges apply requests
// and update managed fields for other types of requests.
-func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) {
+func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (*FieldManager, error) {
f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
@@ -43,7 +44,7 @@ func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime
// NewDefaultCRDFieldManager creates a new FieldManager specifically for
// CRDs. This allows for the possibility of fields which are not defined
// in models, as well as having no models defined at all.
-func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) {
+func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ *FieldManager, err error) {
f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go
index 408739c50f..89e4470548 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go
@@ -22,8 +22,8 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kube-openapi/pkg/schemaconv"
"k8s.io/kube-openapi/pkg/util/proto"
- smdschema "sigs.k8s.io/structured-merge-diff/v4/schema"
- "sigs.k8s.io/structured-merge-diff/v4/typed"
+ smdschema "sigs.k8s.io/structured-merge-diff/v6/schema"
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
)
// groupVersionKindExtensionKey is the key used to lookup the
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go
index 8951932ba4..a9530ff2b4 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go
@@ -22,7 +22,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
)
type capManagersManager struct {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go
index 8c044c9157..1f07b004de 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go
@@ -25,8 +25,8 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
- "sigs.k8s.io/structured-merge-diff/v4/merge"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/merge"
)
// NewConflictError returns an error including details on the requests apply conflicts
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
index eca04a7116..ac8d4279d6 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
@@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
- "sigs.k8s.io/structured-merge-diff/v4/merge"
+ "sigs.k8s.io/structured-merge-diff/v6/merge"
)
// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go
index 08186191a7..5c47b9c546 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go
@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
)
// EmptyFields represents a set with no paths
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go
index 3f6cf88210..d58a1108dc 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go
@@ -24,8 +24,8 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
- "sigs.k8s.io/structured-merge-diff/v4/merge"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/merge"
)
type lastAppliedManager struct {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go
index 9b4c203262..bba2014e20 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go
@@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
)
// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go
index 376eed6b20..66215d87a1 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go
@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
)
type managedFieldsUpdater struct {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go
index 053936103d..78830d0cf5 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go
@@ -19,7 +19,7 @@ package internal
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
)
// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go
index 1954d65d32..1b5dddfd70 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go
@@ -23,8 +23,8 @@ import (
"strconv"
"strings"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
- "sigs.k8s.io/structured-merge-diff/v4/value"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/value"
)
const (
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/runtimetypeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/runtimetypeconverter.go
new file mode 100644
index 0000000000..366ff73363
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/runtimetypeconverter.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "fmt"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
+)
+
+type schemeTypeConverter struct {
+ scheme *runtime.Scheme
+ parser *typed.Parser
+}
+
+var _ TypeConverter = &schemeTypeConverter{}
+
+// NewSchemeTypeConverter creates a TypeConverter that uses the provided scheme to
+// convert between runtime.Objects and TypedValues.
+func NewSchemeTypeConverter(scheme *runtime.Scheme, parser *typed.Parser) TypeConverter {
+ return &schemeTypeConverter{scheme: scheme, parser: parser}
+}
+
+func (tc schemeTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) {
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ name, err := tc.scheme.ToOpenAPIDefinitionName(gvk)
+ if err != nil {
+ return nil, err
+ }
+ t := tc.parser.Type(name)
+ switch o := obj.(type) {
+ case *unstructured.Unstructured:
+ return t.FromUnstructured(o.UnstructuredContent(), opts...)
+ default:
+ return t.FromStructured(obj, opts...)
+ }
+}
+
+func (tc schemeTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
+ vu := value.AsValue().Unstructured()
+ switch o := vu.(type) {
+ case map[string]interface{}:
+ return &unstructured.Unstructured{Object: o}, nil
+ default:
+ return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
index f24c040edd..15357a34d8 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
@@ -80,6 +80,9 @@ func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, manage
if err != nil {
return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err)
}
+ if unstructured, isUnstructured := emptyObj.(runtime.Unstructured); isUnstructured {
+ unstructured.GetObjectKind().SetGroupVersionKind(gvk)
+ }
liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName)
if err != nil {
return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go
index 9b61f3a6f3..8a2b7e4e63 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go
@@ -20,7 +20,7 @@ import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
)
type stripMetaManager struct {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
index 786ad991c2..8e9a270108 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
@@ -19,13 +19,14 @@ package internal
import (
"fmt"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/merge"
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
+
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
- "sigs.k8s.io/structured-merge-diff/v4/merge"
- "sigs.k8s.io/structured-merge-diff/v4/typed"
)
type structuredMergeManager struct {
@@ -41,7 +42,7 @@ var _ Manager = &structuredMergeManager{}
// NewStructuredMergeManager creates a new Manager that merges apply requests
// and update managed fields for other types of requests.
-func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) {
+func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (Manager, error) {
if typeConverter == nil {
return nil, fmt.Errorf("typeconverter must not be nil")
}
@@ -52,8 +53,8 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt
groupVersion: gv,
hubVersion: hub,
updater: merge.Updater{
- Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
- IgnoredFields: resetFields,
+ Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
+ IgnoreFilter: resetFields,
},
}, nil
}
@@ -61,7 +62,7 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt
// NewCRDStructuredMergeManager creates a new Manager specifically for
// CRDs. This allows for the possibility of fields which are not defined
// in models, as well as having no models defined at all.
-func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) {
+func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ Manager, err error) {
return &structuredMergeManager{
typeConverter: typeConverter,
objectConverter: objectConverter,
@@ -69,8 +70,8 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r
groupVersion: gv,
hubVersion: hub,
updater: merge.Updater{
- Converter: newCRDVersionConverter(typeConverter, objectConverter, hub),
- IgnoredFields: resetFields,
+ Converter: newCRDVersionConverter(typeConverter, objectConverter, hub),
+ IgnoreFilter: resetFields,
},
}, nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go
index c6449467cf..cbefc2eba0 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go
@@ -24,9 +24,9 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kube-openapi/pkg/schemaconv"
"k8s.io/kube-openapi/pkg/validation/spec"
- smdschema "sigs.k8s.io/structured-merge-diff/v4/schema"
- "sigs.k8s.io/structured-merge-diff/v4/typed"
- "sigs.k8s.io/structured-merge-diff/v4/value"
+ smdschema "sigs.k8s.io/structured-merge-diff/v6/schema"
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
+ "sigs.k8s.io/structured-merge-diff/v6/value"
)
// TypeConverter allows you to convert from runtime.Object to
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go
index 45855fa4ca..86695e0c0b 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go
@@ -21,9 +21,9 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
- "sigs.k8s.io/structured-merge-diff/v4/merge"
- "sigs.k8s.io/structured-merge-diff/v4/typed"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/merge"
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
)
// versionConverter is an implementation of
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml
index 66e849f23f..a7f2d54fdf 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml
@@ -120,7 +120,7 @@ status:
type: PIDPressure
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:49Z"
- message: kubelet is posting ready status. AppArmor enabled
+ message: kubelet is posting ready status
reason: KubeletReady
status: "True"
type: Ready
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go
index 48b774cece..ca96ca9834 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go
@@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/managedfields/internal"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v6/fieldpath"
)
var (
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go
index d031eefaa3..e706ac8221 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go
@@ -17,6 +17,9 @@ limitations under the License.
package managedfields
import (
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
+
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/managedfields/internal"
"k8s.io/kube-openapi/pkg/validation/spec"
)
@@ -45,3 +48,9 @@ func NewDeducedTypeConverter() TypeConverter {
func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) {
return internal.NewTypeConverter(openapiSpec, preserveUnknownFields)
}
+
+// NewSchemeTypeConverter creates a TypeConverter that uses the provided scheme to
+// convert between runtime.Objects and TypedValues.
+func NewSchemeTypeConverter(scheme *runtime.Scheme, parser *typed.Parser) TypeConverter {
+ return internal.NewSchemeTypeConverter(scheme, parser)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go
index d14ecfad54..ea710f6b15 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package proxy provides transport and upgrade support for proxies.
-package proxy // import "k8s.io/apimachinery/pkg/util/proxy"
+package proxy
diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go
index 76acdfb4ac..8c30a366de 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go
@@ -36,6 +36,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/mxk/go-flowrate/flowrate"
+
"k8s.io/klog/v2"
)
@@ -336,6 +337,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques
clone.Host = h.Location.Host
}
clone.URL = &location
+ klog.V(6).Infof("UpgradeAwareProxy: dialing for SPDY upgrade with headers: %v", clone.Header)
backendConn, err = h.DialForUpgrade(clone)
if err != nil {
klog.V(6).Infof("Proxy connection error: %v", err)
@@ -370,13 +372,13 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques
// hijacking should be the last step in the upgrade.
requestHijacker, ok := w.(http.Hijacker)
if !ok {
- klog.V(6).Infof("Unable to hijack response writer: %T", w)
+ klog.Errorf("Unable to hijack response writer: %T", w)
h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w))
return true
}
requestHijackedConn, _, err := requestHijacker.Hijack()
if err != nil {
- klog.V(6).Infof("Unable to hijack response: %v", err)
+ klog.Errorf("Unable to hijack response: %v", err)
h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err))
return true
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index 3674914f70..cc09bdbc43 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -17,6 +17,7 @@ limitations under the License.
package runtime
import (
+ "context"
"fmt"
"net/http"
"runtime"
@@ -35,7 +36,12 @@ var (
)
// PanicHandlers is a list of functions which will be invoked when a panic happens.
-var PanicHandlers = []func(interface{}){logPanic}
+//
+// The code invoking these handlers prepares a contextual logger so that
+// klog.FromContext(ctx) already skips over the panic handler itself and
+// several other intermediate functions, ideally such that the log output
+// is attributed to the code which triggered the panic.
+var PanicHandlers = []func(context.Context, interface{}){logPanic}
// HandleCrash simply catches a crash and logs an error. Meant to be called via
// defer. Additional context-specific handlers can be provided, and will be
@@ -43,23 +49,74 @@ var PanicHandlers = []func(interface{}){logPanic}
// handlers and logging the panic message.
//
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
+//
+// Contextual logging: HandleCrashWithContext or HandleCrashWithLogger should be used instead of HandleCrash in code which supports contextual logging.
func HandleCrash(additionalHandlers ...func(interface{})) {
if r := recover(); r != nil {
- for _, fn := range PanicHandlers {
- fn(r)
- }
- for _, fn := range additionalHandlers {
- fn(r)
- }
- if ReallyCrash {
- // Actually proceed to panic.
- panic(r)
+ additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
+ for i, handler := range additionalHandlers {
+ handler := handler // capture loop variable
+ additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) {
+ handler(r)
+ }
}
+
+ handleCrash(context.Background(), r, additionalHandlersWithContext...)
+ }
+}
+
+// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via
+// defer. Additional context-specific handlers can be provided, and will be
+// called in case of panic. HandleCrash actually crashes, after calling the
+// handlers and logging the panic message.
+//
+// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
+//
+// The context is used to determine how to log.
+func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) {
+ if r := recover(); r != nil {
+ handleCrash(ctx, r, additionalHandlers...)
+ }
+}
+
+// HandleCrashWithLogger simply catches a crash and logs an error. Meant to be called via
+// defer. Additional context-specific handlers can be provided, and will be
+// called in case of panic. HandleCrash actually crashes, after calling the
+// handlers and logging the panic message.
+//
+// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
+func HandleCrashWithLogger(logger klog.Logger, additionalHandlers ...func(context.Context, interface{})) {
+ if r := recover(); r != nil {
+ ctx := klog.NewContext(context.Background(), logger)
+ handleCrash(ctx, r, additionalHandlers...)
+ }
+}
+
+// handleCrash is the common implementation of the HandleCrash* variants.
+// Having those call a common implementation ensures that the stack depth
+// is the same regardless through which path the handlers get invoked.
+func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) {
+ // We don't really know how many call frames to skip because the Go
+ // panic handler is between us and the code where the panic occurred.
+ // If it's one function (as in Go 1.21), then skipping four levels
+ // gets us to the function which called the `defer HandleCrashWithontext(...)`.
+ logger := klog.FromContext(ctx).WithCallDepth(4)
+ ctx = klog.NewContext(ctx, logger)
+
+ for _, fn := range PanicHandlers {
+ fn(ctx, r)
+ }
+ for _, fn := range additionalHandlers {
+ fn(ctx, r)
+ }
+ if ReallyCrash {
+ // Actually proceed to panic.
+ panic(r)
}
}
// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler).
-func logPanic(r interface{}) {
+func logPanic(ctx context.Context, r interface{}) {
if r == http.ErrAbortHandler {
// honor the http.ErrAbortHandler sentinel panic value:
// ErrAbortHandler is a sentinel panic value to abort a handler.
@@ -73,10 +130,16 @@ func logPanic(r interface{}) {
const size = 64 << 10
stacktrace := make([]byte, size)
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
+
+ logger := klog.FromContext(ctx)
+
+ // For backwards compatibility, conversion to string
+ // is handled here instead of defering to the logging
+ // backend.
if _, ok := r.(string); ok {
- klog.Errorf("Observed a panic: %s\n%s", r, stacktrace)
+ logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace))
} else {
- klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
+ logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace))
}
}
@@ -84,35 +147,90 @@ func logPanic(r interface{}) {
// error occurs.
// TODO(lavalamp): for testability, this and the below HandleError function
// should be packaged up into a testable and reusable object.
-var ErrorHandlers = []func(error){
+var ErrorHandlers = []ErrorHandler{
logError,
- (&rudimentaryErrorBackoff{
- lastErrorTime: time.Now(),
- // 1ms was the number folks were able to stomach as a global rate limit.
- // If you need to log errors more than 1000 times a second you
- // should probably consider fixing your code instead. :)
- minPeriod: time.Millisecond,
- }).OnError,
+ // 1ms was the number folks were able to stomach as a global rate limit.
+ // If you need to log errors more than 1000 times a second, you
+ // should probably consider fixing your code instead. :)
+ backoffError(1 * time.Millisecond),
}
+type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{})
+
// HandlerError is a method to invoke when a non-user facing piece of code cannot
// return an error and needs to indicate it has been ignored. Invoking this method
// is preferable to logging the error - the default behavior is to log but the
// errors may be sent to a remote server for analysis.
+//
+// Contextual logging: HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
func HandleError(err error) {
// this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
if err == nil {
return
}
+ handleError(context.Background(), err, "Unhandled Error")
+}
+
+// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot
+// return an error and needs to indicate it has been ignored. Invoking this method
+// is preferable to logging the error - the default behavior is to log but the
+// errors may be sent to a remote server for analysis. The context is used to
+// determine how to log the error.
+//
+// If contextual logging is enabled, the default log output is equivalent to
+//
+// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...)
+//
+// Without contextual logging, it is equivalent to:
+//
+// klog.ErrorS(err, msg, keysAndValues...)
+//
+// In contrast to HandleError, passing nil for the error is still going to
+// trigger a log entry. Don't construct a new error or wrap an error
+// with fmt.Errorf. Instead, add additional information via the mssage
+// and key/value pairs.
+//
+// This variant should be used instead of HandleError because it supports
+// structured, contextual logging. Alternatively, [HandleErrorWithLogger] can
+// be used if a logger is available instead of a context.
+func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
+ handleError(ctx, err, msg, keysAndValues...)
+}
+
+// HandleErrorWithLogger is an alternative to [HandlerErrorWithContext] which accepts
+// a logger for contextual logging.
+func HandleErrorWithLogger(logger klog.Logger, err error, msg string, keysAndValues ...interface{}) {
+ handleError(klog.NewContext(context.Background(), logger), err, msg, keysAndValues...)
+}
+
+// handleError is the common implementation of the HandleError* variants.
+// Using this common implementation ensures that the stack depth
+// is the same regardless through which path the handlers get invoked.
+func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
for _, fn := range ErrorHandlers {
- fn(err)
+ fn(ctx, err, msg, keysAndValues...)
}
}
-// logError prints an error with the call stack of the location it was reported
-func logError(err error) {
- klog.ErrorDepth(2, err)
+// logError prints an error with the call stack of the location it was reported.
+// It expects to be called as -> HandleError[WithContext] -> handleError -> logError.
+func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
+ logger := klog.FromContext(ctx).WithCallDepth(3)
+ logger = klog.LoggerWithName(logger, "UnhandledError")
+ logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs.
+}
+
+// backoffError blocks if it is called more often than the minPeriod.
+func backoffError(minPeriod time.Duration) ErrorHandler {
+ r := &rudimentaryErrorBackoff{
+ lastErrorTime: time.Now(),
+ minPeriod: minPeriod,
+ }
+
+ return func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
+ r.OnError()
+ }
}
type rudimentaryErrorBackoff struct {
@@ -125,7 +243,7 @@ type rudimentaryErrorBackoff struct {
// OnError will block if it is called more often than the embedded period time.
// This will prevent overly tight hot error loops.
-func (r *rudimentaryErrorBackoff) OnError(error) {
+func (r *rudimentaryErrorBackoff) OnError() {
now := time.Now() // start the timer before acquiring the lock
r.lastErrorTimeLock.Lock()
d := now.Sub(r.lastErrorTime)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go b/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go
deleted file mode 100644
index 443dac62eb..0000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sets
-
-// ordered is a constraint that permits any ordered type: any type
-// that supports the operators < <= >= >.
-// If future releases of Go add new ordered types,
-// this constraint will be modified to include them.
-type ordered interface {
- integer | float | ~string
-}
-
-// integer is a constraint that permits any integer type.
-// If future releases of Go add new predeclared integer types,
-// this constraint will be modified to include them.
-type integer interface {
- signed | unsigned
-}
-
-// float is a constraint that permits any floating-point type.
-// If future releases of Go add new predeclared floating-point types,
-// this constraint will be modified to include them.
-type float interface {
- ~float32 | ~float64
-}
-
-// signed is a constraint that permits any signed integer type.
-// If future releases of Go add new predeclared signed integer types,
-// this constraint will be modified to include them.
-type signed interface {
- ~int | ~int8 | ~int16 | ~int32 | ~int64
-}
-
-// unsigned is a constraint that permits any unsigned integer type.
-// If future releases of Go add new predeclared unsigned integer types,
-// this constraint will be modified to include them.
-type unsigned interface {
- ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
index d50526f426..ae3d15eb25 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
@@ -17,7 +17,8 @@ limitations under the License.
package sets
import (
- "sort"
+ "cmp"
+ "slices"
)
// Set is a set of the same type elements, implemented via map[comparable]struct{} for minimal memory consumption.
@@ -37,7 +38,7 @@ func New[T comparable](items ...T) Set[T] {
// KeySet creates a Set from a keys of a map[comparable](? extends interface{}).
// If the value passed in is not actually a map, this will panic.
func KeySet[T comparable, V any](theMap map[T]V) Set[T] {
- ret := Set[T]{}
+ ret := make(Set[T], len(theMap))
for keyValue := range theMap {
ret.Insert(keyValue)
}
@@ -67,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] {
// Clear empties the set.
// It is preferable to replace the set with a newly constructed set,
// but not all callers can do that (when there are other references to the map).
-// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN
-// can't be cleared because NaN can't be removed.
-// For sets containing items of a type that is reflexive for ==,
-// this is optimized to a single call to runtime.mapclear().
func (s Set[T]) Clear() Set[T] {
- for key := range s {
- delete(s, key)
- }
+ clear(s)
return s
}
@@ -193,22 +188,13 @@ func (s1 Set[T]) Equal(s2 Set[T]) bool {
return len(s1) == len(s2) && s1.IsSuperset(s2)
}
-type sortableSliceOfGeneric[T ordered] []T
-
-func (g sortableSliceOfGeneric[T]) Len() int { return len(g) }
-func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) }
-func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
-
// List returns the contents as a sorted T slice.
//
// This is a separate function and not a method because not all types supported
// by Generic are ordered and only those can be sorted.
-func List[T ordered](s Set[T]) []T {
- res := make(sortableSliceOfGeneric[T], 0, len(s))
- for key := range s {
- res = append(res, key)
- }
- sort.Sort(res)
+func List[T cmp.Ordered](s Set[T]) []T {
+ res := s.UnsortedList()
+ slices.Sort(res)
return res
}
@@ -235,7 +221,3 @@ func (s Set[T]) PopAny() (T, bool) {
func (s Set[T]) Len() int {
return len(s)
}
-
-func less[T ordered](lhs, rhs T) bool {
- return lhs < rhs
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
index 85b0cfc072..1bfed1c2ec 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
@@ -249,7 +249,7 @@ var _ LookupPatchMeta = PatchMetaFromOpenAPI{}
func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) {
if s.Schema == nil {
- return nil, PatchMeta{}, nil
+ return &PatchMetaFromOpenAPI{}, PatchMeta{}, nil
}
kindItem := NewKindItem(key, s.Schema.GetPath())
s.Schema.Accept(kindItem)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
index 920c113bbd..6825a808e6 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
@@ -1361,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me
// original. Otherwise, check if we want to preserve it or skip it.
// Preserving the null value is useful when we want to send an explicit
// delete to the API server.
+ // In some cases, this may lead to inconsistent behavior with create.
+ // ref: https://github.com/kubernetes/kubernetes/issues/123304
+ // To avoid breaking compatibility,
+ // we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent.
if patchV == nil {
delete(original, k)
if mergeOptions.IgnoreUnmatchedNulls {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS
new file mode 100644
index 0000000000..4023732476
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# Disable inheritance as this is an api owners file
+options:
+ no_parent_owners: true
+approvers:
+ - api-approvers
+reviewers:
+ - api-reviewers
+labels:
+ - kind/api-change
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
new file mode 100644
index 0000000000..f0264e50c7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
@@ -0,0 +1,321 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+)
+
+// NormalizationRule holds a pre-compiled regular expression and its replacement string
+// for normalizing field paths.
+type NormalizationRule struct {
+ Regexp *regexp.Regexp
+ Replacement string
+}
+
+// ErrorMatcher is a helper for comparing Error objects.
+type ErrorMatcher struct {
+ // TODO(thockin): consider whether type is ever NOT required, maybe just
+ // assume it.
+ matchType bool
+ // TODO(thockin): consider whether field could be assumed - if the
+ // "want" error has a nil field, don't match on field.
+ matchField bool
+ // TODO(thockin): consider whether value could be assumed - if the
+ // "want" error has a nil value, don't match on value.
+ matchValue bool
+ matchOrigin bool
+ matchDetail func(want, got string) bool
+ requireOriginWhenInvalid bool
+ // normalizationRules holds the pre-compiled regex patterns for path normalization.
+ normalizationRules []NormalizationRule
+}
+
+// Matches returns true if the two Error objects match according to the
+// configured criteria. When field normalization is configured, only the
+// "got" error's field path is normalized (to bring older API versions up
+// to the internal/latest format), while "want" is assumed to already be
+// in the canonical internal API format.
+func (m ErrorMatcher) Matches(want, got *Error) bool {
+ if m.matchType && want.Type != got.Type {
+ return false
+ }
+ if m.matchField {
+ // Try direct match first (common case)
+ if want.Field != got.Field {
+ // Fields don't match, try normalization if rules are configured.
+ // Only normalize "got" - it may be from an older API version that
+ // needs to be brought up to the internal/latest format that "want"
+ // is already in.
+ if want.Field != m.normalizePath(got.Field) {
+ return false
+ }
+ }
+ }
+
+ if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) {
+ return false
+ }
+ if m.matchOrigin {
+ if want.Origin != got.Origin {
+ return false
+ }
+ if m.requireOriginWhenInvalid && want.Type == ErrorTypeInvalid {
+ if want.Origin == "" || got.Origin == "" {
+ return false
+ }
+ }
+ }
+ if m.matchDetail != nil && !m.matchDetail(want.Detail, got.Detail) {
+ return false
+ }
+ return true
+}
+
+// normalizePath applies configured path normalization rules.
+func (m ErrorMatcher) normalizePath(path string) string {
+ for _, rule := range m.normalizationRules {
+ normalized := rule.Regexp.ReplaceAllString(path, rule.Replacement)
+ if normalized != path {
+ // Only apply the first matching rule.
+ return normalized
+ }
+ }
+ return path
+}
+
+// Render returns a string representation of the specified Error object,
+// according to the criteria configured in the ErrorMatcher.
+func (m ErrorMatcher) Render(e *Error) string {
+ buf := strings.Builder{}
+
+ comma := func() {
+ if buf.Len() > 0 {
+ buf.WriteString(", ")
+ }
+ }
+
+ if m.matchType {
+ comma()
+ buf.WriteString(fmt.Sprintf("Type=%q", e.Type))
+ }
+ if m.matchField {
+ comma()
+ if normalized := m.normalizePath(e.Field); normalized != e.Field {
+ buf.WriteString(fmt.Sprintf("Field=%q (aka %q)", normalized, e.Field))
+ } else {
+ buf.WriteString(fmt.Sprintf("Field=%q", e.Field))
+ }
+ }
+ if m.matchValue {
+ comma()
+ if s, ok := e.BadValue.(string); ok {
+ buf.WriteString(fmt.Sprintf("Value=%q", s))
+ } else {
+ rv := reflect.ValueOf(e.BadValue)
+ if rv.Kind() == reflect.Pointer && !rv.IsNil() {
+ rv = rv.Elem()
+ }
+ if rv.IsValid() && rv.CanInterface() {
+ buf.WriteString(fmt.Sprintf("Value=%v", rv.Interface()))
+ } else {
+ buf.WriteString(fmt.Sprintf("Value=%v", e.BadValue))
+ }
+ }
+ }
+ if m.matchOrigin || m.requireOriginWhenInvalid && e.Type == ErrorTypeInvalid {
+ comma()
+ buf.WriteString(fmt.Sprintf("Origin=%q", e.Origin))
+ }
+ if m.matchDetail != nil {
+ comma()
+ buf.WriteString(fmt.Sprintf("Detail=%q", e.Detail))
+ }
+ return "{" + buf.String() + "}"
+}
+
+// Exactly returns a derived ErrorMatcher which matches all fields exactly.
+func (m ErrorMatcher) Exactly() ErrorMatcher {
+ return m.ByType().ByField().ByValue().ByOrigin().ByDetailExact()
+}
+
+// ByType returns a derived ErrorMatcher which also matches by type.
+func (m ErrorMatcher) ByType() ErrorMatcher {
+ m.matchType = true
+ return m
+}
+
+// ByField returns a derived ErrorMatcher which also matches by field path.
+// If you need to mutate the field path (e.g. to normalize across versions),
+// see ByFieldNormalized.
+func (m ErrorMatcher) ByField() ErrorMatcher {
+ m.matchField = true
+ return m
+}
+
+// ByFieldNormalized returns a derived ErrorMatcher which also matches by field path
+// after applying normalization rules to the actual (got) error's field path.
+// This allows matching field paths from older API versions against the canonical
+// internal API format.
+//
+// The normalization rules are applied ONLY to the "got" error's field path, bringing
+// older API version field paths up to the latest/internal format. The "want" error
+// is assumed to always be in the internal API format (latest).
+//
+// The rules slice holds pre-compiled regular expressions and their replacement strings.
+//
+// Example:
+//
+// rules := []NormalizationRule{
+// {
+// Regexp: regexp.MustCompile(`spec\.devices\.requests\[(\d+)\]\.allocationMode`),
+// Replacement: "spec.devices.requests[$1].exactly.allocationMode",
+// },
+// }
+// matcher := ErrorMatcher{}.ByFieldNormalized(rules)
+func (m ErrorMatcher) ByFieldNormalized(rules []NormalizationRule) ErrorMatcher {
+ m.matchField = true
+ m.normalizationRules = rules
+ return m
+}
+
+// ByValue returns a derived ErrorMatcher which also matches by the errant
+// value.
+func (m ErrorMatcher) ByValue() ErrorMatcher {
+ m.matchValue = true
+ return m
+}
+
+// ByOrigin returns a derived ErrorMatcher which also matches by the origin.
+// When this is used and an origin is set in the error, the matcher will
+// consider all expected errors with the same origin to be a match. The only
+// expception to this is when it finds two errors which are exactly identical,
+// which is too suspicious to ignore. This multi-matching allows tests to
+// express a single expectation ("I set the X field to an invalid value, and I
+// expect an error from origin Y") without having to know exactly how many
+// errors might be returned, or in what order, or with what wording.
+func (m ErrorMatcher) ByOrigin() ErrorMatcher {
+ m.matchOrigin = true
+ return m
+}
+
+// RequireOriginWhenInvalid returns a derived ErrorMatcher which also requires
+// the Origin field to be set when the Type is Invalid and the matcher is
+// matching by Origin.
+func (m ErrorMatcher) RequireOriginWhenInvalid() ErrorMatcher {
+ m.requireOriginWhenInvalid = true
+ return m
+}
+
+// ByDetailExact returns a derived ErrorMatcher which also matches errors by
+// the exact detail string.
+func (m ErrorMatcher) ByDetailExact() ErrorMatcher {
+ m.matchDetail = func(want, got string) bool {
+ return got == want
+ }
+ return m
+}
+
+// ByDetailSubstring returns a derived ErrorMatcher which also matches errors
+// by a substring of the detail string.
+func (m ErrorMatcher) ByDetailSubstring() ErrorMatcher {
+ m.matchDetail = func(want, got string) bool {
+ return strings.Contains(got, want)
+ }
+ return m
+}
+
+// ByDetailRegexp returns a derived ErrorMatcher which also matches errors by a
+// regular expression of the detail string, where the "want" string is assumed
+// to be a valid regular expression.
+func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher {
+ m.matchDetail = func(want, got string) bool {
+ return regexp.MustCompile(want).MatchString(got)
+ }
+ return m
+}
+
+// TestIntf lets users pass a testing.T while not coupling this package to Go's
+// testing package.
+type TestIntf interface {
+ Helper()
+ Errorf(format string, args ...any)
+}
+
+// Test compares two ErrorLists by the criteria configured in this matcher, and
+// fails the test if they don't match. The "want" errors are expected to be in
+// the internal API format (latest), while "got" errors may be from any API version
+// and will be normalized if field normalization rules are configured.
+//
+// If matching by origin is enabled and the error has a non-empty origin, a given
+// "want" error can match multiple "got" errors, and they will all be consumed.
+// The only exception to this is if the matcher got multiple identical (in every way,
+// even those not being matched on) errors, which is likely to indicate a bug.
+func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) {
+ tb.Helper()
+
+ exactly := m.Exactly() // makes a copy
+
+ // If we ever find an EXACT duplicate error, it's almost certainly a bug
+ // worth reporting. If we ever find a use-case where this is not a bug, we
+ // can revisit this assumption.
+ seen := map[string]bool{}
+ for _, g := range got {
+ key := exactly.Render(g)
+ if seen[key] {
+ tb.Errorf("exact duplicate error:\n%s", key)
+ }
+ seen[key] = true
+ }
+
+ remaining := got
+ for _, w := range want {
+ tmp := make(ErrorList, 0, len(remaining))
+ matched := false
+ for i, g := range remaining {
+ if m.Matches(w, g) {
+ matched = true
+ if m.matchOrigin && w.Origin != "" {
+ // When origin is included in the match, we allow multiple
+ // matches against the same wanted error, so that tests
+ // can be insulated from the exact number, order, and
+ // wording of cases that might return more than one error.
+ continue
+ } else {
+ // Single-match, save the rest of the "got" errors and move
+ // on to the next "want" error.
+ tmp = append(tmp, remaining[i+1:]...)
+ break
+ }
+ } else {
+ tmp = append(tmp, g)
+ }
+ }
+ if !matched {
+ tb.Errorf("expected an error matching:\n%s", m.Render(w))
+ }
+ remaining = tmp
+ }
+ if len(remaining) > 0 {
+ for _, e := range remaining {
+ tb.Errorf("unmatched error:\n%s", exactly.Render(e))
+ }
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
index bc387d0116..950d838682 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -17,8 +17,8 @@ limitations under the License.
package field
import (
+ "encoding/json"
"fmt"
- "reflect"
"strconv"
"strings"
@@ -33,13 +33,35 @@ type Error struct {
Field string
BadValue interface{}
Detail string
+
+ // Origin uniquely identifies where this error was generated from. It is used in testing to
+ // compare expected errors against actual errors without relying on exact detail string matching.
+ // This allows tests to verify the correct validation logic triggered the error
+ // regardless of how the error message might be formatted or localized.
+ //
+ // The value should be either:
+ // - A simple camelCase identifier (e.g., "maximum", "maxItems")
+ // - A structured format using "format=" for validation errors related to specific formats
+ // (e.g., "format=dns-label", "format=qualified-name")
+ //
+ // If the Origin corresponds to an existing declarative validation tag or JSON Schema keyword,
+ // use that same name for consistency.
+ //
+ // Origin should be set in the most deeply nested validation function that
+ // can still identify the unique source of the error.
+ Origin string
+
+ // CoveredByDeclarative is true when this error is covered by declarative
+ // validation. This field is to identify errors from imperative validation
+ // that should also be caught by declarative validation.
+ CoveredByDeclarative bool
}
var _ error = &Error{}
// Error implements the error interface.
-func (v *Error) Error() string {
- return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
+func (e *Error) Error() string {
+ return fmt.Sprintf("%s: %s", e.Field, e.ErrorBody())
}
type OmitValueType struct{}
@@ -48,54 +70,64 @@ var omitValue = OmitValueType{}
// ErrorBody returns the error message without the field name. This is useful
// for building nice-looking higher-level error reporting.
-func (v *Error) ErrorBody() string {
+func (e *Error) ErrorBody() string {
var s string
- switch {
- case v.Type == ErrorTypeRequired:
- s = v.Type.String()
- case v.Type == ErrorTypeForbidden:
- s = v.Type.String()
- case v.Type == ErrorTypeTooLong:
- s = v.Type.String()
- case v.Type == ErrorTypeInternal:
- s = v.Type.String()
- case v.BadValue == omitValue:
- s = v.Type.String()
- default:
- value := v.BadValue
- valueType := reflect.TypeOf(value)
- if value == nil || valueType == nil {
- value = "null"
- } else if valueType.Kind() == reflect.Pointer {
- if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() {
- value = "null"
- } else {
- value = reflectValue.Elem().Interface()
- }
+ switch e.Type {
+ case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
+ s = e.Type.String()
+ case ErrorTypeInvalid, ErrorTypeTypeInvalid, ErrorTypeNotSupported,
+ ErrorTypeNotFound, ErrorTypeDuplicate, ErrorTypeTooMany:
+ if e.BadValue == omitValue {
+ s = e.Type.String()
+ break
}
- switch t := value.(type) {
+ switch t := e.BadValue.(type) {
case int64, int32, float64, float32, bool:
// use simple printer for simple types
- s = fmt.Sprintf("%s: %v", v.Type, value)
+ s = fmt.Sprintf("%s: %v", e.Type, t)
case string:
- s = fmt.Sprintf("%s: %q", v.Type, t)
- case fmt.Stringer:
- // anything that defines String() is better than raw struct
- s = fmt.Sprintf("%s: %s", v.Type, t.String())
+ s = fmt.Sprintf("%s: %q", e.Type, t)
default:
- // fallback to raw struct
- // TODO: internal types have panic guards against json.Marshalling to prevent
- // accidental use of internal types in external serialized form. For now, use
- // %#v, although it would be better to show a more expressive output in the future
- s = fmt.Sprintf("%s: %#v", v.Type, value)
+ // use more complex techniques to render more complex types
+ valstr := ""
+ jb, err := json.Marshal(e.BadValue)
+ if err == nil {
+ // best case
+ valstr = string(jb)
+ } else if stringer, ok := e.BadValue.(fmt.Stringer); ok {
+ // anything that defines String() is better than raw struct
+ valstr = stringer.String()
+ } else {
+ // worst case - fallback to raw struct
+ // TODO: internal types have panic guards against json.Marshalling to prevent
+ // accidental use of internal types in external serialized form. For now, use
+ // %#v, although it would be better to show a more expressive output in the future
+ valstr = fmt.Sprintf("%#v", e.BadValue)
+ }
+ s = fmt.Sprintf("%s: %s", e.Type, valstr)
}
+ default:
+ internal := InternalError(nil, fmt.Errorf("unhandled error code: %s: please report this", e.Type))
+ s = internal.ErrorBody()
}
- if len(v.Detail) != 0 {
- s += fmt.Sprintf(": %s", v.Detail)
+ if len(e.Detail) != 0 {
+ s += fmt.Sprintf(": %s", e.Detail)
}
return s
}
+// WithOrigin adds origin information to the FieldError
+func (e *Error) WithOrigin(o string) *Error {
+ e.Origin = o
+ return e
+}
+
+// MarkCoveredByDeclarative marks the error as covered by declarative validation.
+func (e *Error) MarkCoveredByDeclarative() *Error {
+ e.CoveredByDeclarative = true
+ return e
+}
+
// ErrorType is a machine readable value providing more detail about why
// a field is invalid. These values are expected to match 1-1 with
// CauseType in api/types.go.
@@ -163,38 +195,38 @@ func (t ErrorType) String() string {
case ErrorTypeTypeInvalid:
return "Invalid value"
default:
- panic(fmt.Sprintf("unrecognized validation error: %q", string(t)))
+ return fmt.Sprintf("", string(t))
}
}
// TypeInvalid returns a *Error indicating "type is invalid"
func TypeInvalid(field *Path, value interface{}, detail string) *Error {
- return &Error{ErrorTypeTypeInvalid, field.String(), value, detail}
+ return &Error{ErrorTypeTypeInvalid, field.String(), value, detail, "", false}
}
// NotFound returns a *Error indicating "value not found". This is
// used to report failure to find a requested value (e.g. looking up an ID).
func NotFound(field *Path, value interface{}) *Error {
- return &Error{ErrorTypeNotFound, field.String(), value, ""}
+ return &Error{ErrorTypeNotFound, field.String(), value, "", "", false}
}
// Required returns a *Error indicating "value required". This is used
// to report required values that are not provided (e.g. empty strings, null
// values, or empty arrays).
func Required(field *Path, detail string) *Error {
- return &Error{ErrorTypeRequired, field.String(), "", detail}
+ return &Error{ErrorTypeRequired, field.String(), "", detail, "", false}
}
// Duplicate returns a *Error indicating "duplicate value". This is
// used to report collisions of values that must be unique (e.g. names or IDs).
func Duplicate(field *Path, value interface{}) *Error {
- return &Error{ErrorTypeDuplicate, field.String(), value, ""}
+ return &Error{ErrorTypeDuplicate, field.String(), value, "", "", false}
}
// Invalid returns a *Error indicating "invalid value". This is used
// to report malformed values (e.g. failed regex match, too long, out of bounds).
func Invalid(field *Path, value interface{}, detail string) *Error {
- return &Error{ErrorTypeInvalid, field.String(), value, detail}
+ return &Error{ErrorTypeInvalid, field.String(), value, detail, "", false}
}
// NotSupported returns a *Error indicating "unsupported value".
@@ -209,7 +241,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
}
detail = "supported values: " + strings.Join(quotedValues, ", ")
}
- return &Error{ErrorTypeNotSupported, field.String(), value, detail}
+ return &Error{ErrorTypeNotSupported, field.String(), value, detail, "", false}
}
// Forbidden returns a *Error indicating "forbidden". This is used to
@@ -217,29 +249,31 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
// some conditions, but which are not permitted by current conditions (e.g.
// security policy).
func Forbidden(field *Path, detail string) *Error {
- return &Error{ErrorTypeForbidden, field.String(), "", detail}
+ return &Error{ErrorTypeForbidden, field.String(), "", detail, "", false}
}
-// TooLong returns a *Error indicating "too long". This is used to
-// report that the given value is too long. This is similar to
-// Invalid, but the returned error will not include the too-long
-// value.
-func TooLong(field *Path, value interface{}, maxLength int) *Error {
- return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
-}
-
-// TooLongMaxLength returns a *Error indicating "too long". This is used to
-// report that the given value is too long. This is similar to
-// Invalid, but the returned error will not include the too-long
-// value. If maxLength is negative, no max length will be included in the message.
-func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
+// TooLong returns a *Error indicating "too long". This is used to report that
+// the given value is too long. This is similar to Invalid, but the returned
+// error will not include the too-long value. If maxLength is negative, it will
+// be included in the message. The value argument is not used.
+func TooLong(field *Path, _ interface{}, maxLength int) *Error {
var msg string
if maxLength >= 0 {
- msg = fmt.Sprintf("may not be longer than %d", maxLength)
+ bs := "bytes"
+ if maxLength == 1 {
+ bs = "byte"
+ }
+ msg = fmt.Sprintf("may not be more than %d %s", maxLength, bs)
} else {
msg = "value is too long"
}
- return &Error{ErrorTypeTooLong, field.String(), value, msg}
+ return &Error{ErrorTypeTooLong, field.String(), "", msg, "", false}
+}
+
+// TooLongMaxLength returns a *Error indicating "too long".
+// Deprecated: Use TooLong instead.
+func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
+ return TooLong(field, "", maxLength)
}
// TooMany returns a *Error indicating "too many". This is used to
@@ -249,7 +283,11 @@ func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
var msg string
if maxQuantity >= 0 {
- msg = fmt.Sprintf("must have at most %d items", maxQuantity)
+ is := "items"
+ if maxQuantity == 1 {
+ is = "item"
+ }
+ msg = fmt.Sprintf("must have at most %d %s", maxQuantity, is)
} else {
msg = "has too many items"
}
@@ -261,14 +299,14 @@ func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
actual = omitValue
}
- return &Error{ErrorTypeTooMany, field.String(), actual, msg}
+ return &Error{ErrorTypeTooMany, field.String(), actual, msg, "", false}
}
// InternalError returns a *Error indicating "internal error". This is used
// to signal that an error was found that was not directly related to user
// input. The err argument must be non-nil.
func InternalError(field *Path, err error) *Error {
- return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
+ return &Error{ErrorTypeInternal, field.String(), nil, err.Error(), "", false}
}
// ErrorList holds a set of Errors. It is plausible that we might one day have
@@ -287,6 +325,30 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
}
}
+// WithOrigin sets the origin for all errors in the list and returns the updated list.
+func (list ErrorList) WithOrigin(origin string) ErrorList {
+ for _, err := range list {
+ err.Origin = origin
+ }
+ return list
+}
+
+// MarkCoveredByDeclarative marks all errors in the list as covered by declarative validation.
+func (list ErrorList) MarkCoveredByDeclarative() ErrorList {
+ for _, err := range list {
+ err.CoveredByDeclarative = true
+ }
+ return list
+}
+
+// PrefixDetail adds a prefix to the Detail for all errors in the list and returns the updated list.
+func (list ErrorList) PrefixDetail(prefix string) ErrorList {
+ for _, err := range list {
+ err.Detail = prefix + err.Detail
+ }
+ return list
+}
+
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
if len(list) == 0 {
@@ -323,3 +385,25 @@ func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
// FilterOut takes an Aggregate and returns an Aggregate
return fromAggregate(err.(utilerrors.Aggregate))
}
+
+// ExtractCoveredByDeclarative returns a new ErrorList containing only the errors that should be covered by declarative validation.
+func (list ErrorList) ExtractCoveredByDeclarative() ErrorList {
+ newList := ErrorList{}
+ for _, err := range list {
+ if err.CoveredByDeclarative {
+ newList = append(newList, err)
+ }
+ }
+ return newList
+}
+
+// RemoveCoveredByDeclarative returns a new ErrorList containing only the errors that should not be covered by declarative validation.
+func (list ErrorList) RemoveCoveredByDeclarative() ErrorList {
+ newList := ErrorList{}
+ for _, err := range list {
+ if !err.CoveredByDeclarative {
+ newList = append(newList, err)
+ }
+ }
+ return newList
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go
new file mode 100644
index 0000000000..6e947c74c9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go
@@ -0,0 +1,278 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+ "net"
+ "net/netip"
+ "slices"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/klog/v2"
+ netutils "k8s.io/utils/net"
+)
+
+func parseIP(fldPath *field.Path, value string, strictValidation bool) (net.IP, field.ErrorList) {
+ var allErrors field.ErrorList
+
+ ip := netutils.ParseIPSloppy(value)
+ if ip == nil {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"))
+ return nil, allErrors
+ }
+
+ if strictValidation {
+ addr, err := netip.ParseAddr(value)
+ if err != nil {
+ // If netutils.ParseIPSloppy parsed it, but netip.ParseAddr
+ // doesn't, then it must have illegal leading 0s.
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s"))
+ }
+ if addr.Is4In6() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not be an IPv4-mapped IPv6 address"))
+ }
+ }
+
+ return ip, allErrors
+}
+
+// IsValidIPForLegacyField tests that the argument is a valid IP address for a "legacy"
+// API field that predates strict IP validation. In particular, this allows IPs that are
+// not in canonical form (e.g., "FE80:0:0:0:0:0:0:0abc" instead of "fe80::abc").
+//
+// If strictValidation is false, this also allows IPs in certain invalid or ambiguous
+// formats:
+//
+// 1. IPv4 IPs are allowed to have leading "0"s in octets (e.g. "010.002.003.004").
+// Historically, net.ParseIP (and later netutils.ParseIPSloppy) simply ignored leading
+// "0"s in IPv4 addresses, but most libc-based software treats 0-prefixed IPv4 octets
+// as octal, meaning different software might interpret the same string as two
+// different IPs, potentially leading to security issues. (Current net.ParseIP and
+// netip.ParseAddr simply reject inputs with leading "0"s.)
+//
+// 2. IPv4-mapped IPv6 IPs (e.g. "::ffff:1.2.3.4") are allowed. These can also lead to
+// different software interpreting the value in different ways, because they may be
+// treated as IPv4 by some software and IPv6 by other software. (net.ParseIP and
+// netip.ParseAddr both allow these, but there are no use cases for representing IPv4
+// addresses as IPv4-mapped IPv6 addresses in Kubernetes.)
+//
+// Alternatively, when validating an update to an existing field, you can pass a list of
+// IP values from the old object that should be accepted if they appear in the new object
+// even if they are not valid.
+//
+// This function should only be used to validate the existing fields that were
+// historically validated in this way, and strictValidation should be true unless the
+// StrictIPCIDRValidation feature gate is disabled. Use IsValidIP for parsing new fields.
+func IsValidIPForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldIPs []string) field.ErrorList {
+ if slices.Contains(validOldIPs, value) {
+ return nil
+ }
+ _, allErrors := parseIP(fldPath, value, strictValidation)
+ return allErrors.WithOrigin("format=ip-sloppy")
+}
+
+// IsValidIP tests that the argument is a valid IP address, according to current
+// Kubernetes standards for IP address validation.
+func IsValidIP(fldPath *field.Path, value string) field.ErrorList {
+ ip, allErrors := parseIP(fldPath, value, true)
+ if len(allErrors) != 0 {
+ return allErrors.WithOrigin("format=ip-strict")
+ }
+
+ if value != ip.String() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ip.String())))
+ }
+ return allErrors.WithOrigin("format=ip-strict")
+}
+
+// GetWarningsForIP returns warnings for IP address values in non-standard forms. This
+// should only be used with fields that are validated with IsValidIPForLegacyField().
+func GetWarningsForIP(fldPath *field.Path, value string) []string {
+ ip := netutils.ParseIPSloppy(value)
+ if ip == nil {
+ klog.ErrorS(nil, "GetWarningsForIP called on value that was not validated with IsValidIPForLegacyField", "field", fldPath, "value", value)
+ return nil
+ }
+
+ addr, _ := netip.ParseAddr(value)
+ if !addr.IsValid() || addr.Is4In6() {
+ // This catches 2 cases: leading 0s (if ParseIPSloppy() accepted it but
+ // ParseAddr() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
+ // re-stringifying the net.IP value will give the preferred form.
+ return []string{
+ fmt.Sprintf("%s: non-standard IP address %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ip.String()),
+ }
+ }
+
+ // If ParseIPSloppy() and ParseAddr() both accept it then it's fully valid, though
+ // it may be non-canonical.
+ if addr.Is6() && addr.String() != value {
+ return []string{
+ fmt.Sprintf("%s: IPv6 address %q should be in RFC 5952 canonical format (%q)", fldPath, value, addr.String()),
+ }
+ }
+
+ return nil
+}
+
+func parseCIDR(fldPath *field.Path, value string, strictValidation bool) (*net.IPNet, field.ErrorList) {
+ var allErrors field.ErrorList
+
+ _, ipnet, err := netutils.ParseCIDRSloppy(value)
+ if err != nil {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)"))
+ return nil, allErrors
+ }
+
+ if strictValidation {
+ prefix, err := netip.ParsePrefix(value)
+ if err != nil {
+ // If netutils.ParseCIDRSloppy parsed it, but netip.ParsePrefix
+ // doesn't, then it must have illegal leading 0s (either in the
+ // IP part or the prefix).
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s in IP or prefix length"))
+ } else if prefix.Addr().Is4In6() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have an IPv4-mapped IPv6 address"))
+ } else if prefix.Addr() != prefix.Masked().Addr() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have bits set beyond the prefix length"))
+ }
+ }
+
+ return ipnet, allErrors
+}
+
+// IsValidCIDRForLegacyField tests that the argument is a valid CIDR value for a "legacy"
+// API field that predates strict IP validation. In particular, this allows IPs that are
+// not in canonical form (e.g., "FE80:0abc:0:0:0:0:0:0/64" instead of "fe80:abc::/64").
+//
+// If strictValidation is false, this also allows CIDR values in certain invalid or
+// ambiguous formats:
+//
+// 1. The IP part of the CIDR value is parsed as with IsValidIPForLegacyField with
+// strictValidation=false.
+//
+// 2. The CIDR value is allowed to be either a "subnet"/"mask" (with the lower bits after
+// the prefix length all being 0), or an "interface address" as with `ip addr` (with a
+// complete IP address and associated subnet length). With strict validation, the
+// value is required to be in "subnet"/"mask" form.
+//
+// 3. The prefix length is allowed to have leading 0s.
+//
+// Alternatively, when validating an update to an existing field, you can pass a list of
+// CIDR values from the old object that should be accepted if they appear in the new
+// object even if they are not valid.
+//
+// This function should only be used to validate the existing fields that were
+// historically validated in this way, and strictValidation should be true unless the
+// StrictIPCIDRValidation feature gate is disabled. Use IsValidCIDR or
+// IsValidInterfaceAddress for parsing new fields.
+func IsValidCIDRForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldCIDRs []string) field.ErrorList {
+ if slices.Contains(validOldCIDRs, value) {
+ return nil
+ }
+
+ _, allErrors := parseCIDR(fldPath, value, strictValidation)
+ return allErrors
+}
+
+// IsValidCIDR tests that the argument is a valid CIDR value, according to current
+// Kubernetes standards for CIDR validation. This function is only for
+// "subnet"/"mask"-style CIDR values (e.g., "192.168.1.0/24", with no bits set beyond the
+// prefix length). Use IsValidInterfaceAddress for "ifaddr"-style CIDR values.
+func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList {
+ ipnet, allErrors := parseCIDR(fldPath, value, true)
+ if len(allErrors) != 0 {
+ return allErrors
+ }
+
+ if value != ipnet.String() {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ipnet.String())))
+ }
+ return allErrors
+}
+
+// GetWarningsForCIDR returns warnings for CIDR values in non-standard forms. This should
+// only be used with fields that are validated with IsValidCIDRForLegacyField().
+func GetWarningsForCIDR(fldPath *field.Path, value string) []string {
+ ip, ipnet, err := netutils.ParseCIDRSloppy(value)
+ if err != nil {
+ klog.ErrorS(err, "GetWarningsForCIDR called on value that was not validated with IsValidCIDRForLegacyField", "field", fldPath, "value", value)
+ return nil
+ }
+
+ var warnings []string
+
+ // Check for bits set after prefix length
+ if !ip.Equal(ipnet.IP) {
+ _, addrlen := ipnet.Mask.Size()
+ singleIPCIDR := fmt.Sprintf("%s/%d", ip.String(), addrlen)
+ warnings = append(warnings,
+ fmt.Sprintf("%s: CIDR value %q is ambiguous in this context (should be %q or %q?)", fldPath, value, ipnet.String(), singleIPCIDR),
+ )
+ }
+
+ prefix, _ := netip.ParsePrefix(value)
+ addr := prefix.Addr()
+ if !prefix.IsValid() || addr.Is4In6() {
+ // This catches 2 cases: leading 0s (if ParseCIDRSloppy() accepted it but
+ // ParsePrefix() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
+ // re-stringifying the net.IPNet value will give the preferred form.
+ warnings = append(warnings,
+ fmt.Sprintf("%s: non-standard CIDR value %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ipnet.String()),
+ )
+ }
+
+ // If ParseCIDRSloppy() and ParsePrefix() both accept it then it's fully valid,
+ // though it may be non-canonical. But only check this if there are no other
+ // warnings, since either of the other warnings would also cause a round-trip
+ // failure.
+ if len(warnings) == 0 && addr.Is6() && prefix.String() != value {
+ warnings = append(warnings,
+ fmt.Sprintf("%s: IPv6 CIDR value %q should be in RFC 5952 canonical format (%q)", fldPath, value, prefix.String()),
+ )
+ }
+
+ return warnings
+}
+
+// IsValidInterfaceAddress tests that the argument is a valid "ifaddr"-style CIDR value in
+// canonical form (e.g., "192.168.1.5/24", with a complete IP address and associated
+// subnet length). Use IsValidCIDR for "subnet"/"mask"-style CIDR values (e.g.,
+// "192.168.1.0/24").
+func IsValidInterfaceAddress(fldPath *field.Path, value string) field.ErrorList {
+ var allErrors field.ErrorList
+ ip, ipnet, err := netutils.ParseCIDRSloppy(value)
+ if err != nil {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid address in CIDR form, (e.g. 10.9.8.7/24 or 2001:db8::1/64)"))
+ return allErrors
+ }
+
+ // The canonical form of `value` is not `ipnet.String()`, because `ipnet` doesn't
+ // include the bits after the prefix. We need to construct the canonical form
+ // ourselves from `ip` and `ipnet.Mask`.
+ maskSize, _ := ipnet.Mask.Size()
+ if netutils.IsIPv4(ip) && maskSize > net.IPv4len*8 {
+ // "::ffff:192.168.0.1/120" -> "192.168.0.1/24"
+ maskSize -= (net.IPv6len - net.IPv4len) * 8
+ }
+ canonical := fmt.Sprintf("%s/%d", ip.String(), maskSize)
+ if value != canonical {
+ allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", canonical)))
+ }
+ return allErrors
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index 0b8a6cb354..352ff19ae5 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -19,57 +19,21 @@ package validation
import (
"fmt"
"math"
- "net"
"regexp"
- "strconv"
"strings"
+ "unicode"
+
+ "k8s.io/apimachinery/pkg/api/validate/content"
"k8s.io/apimachinery/pkg/util/validation/field"
- netutils "k8s.io/utils/net"
)
-const qnameCharFmt string = "[A-Za-z0-9]"
-const qnameExtCharFmt string = "[-A-Za-z0-9_.]"
-const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt
-const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
-const qualifiedNameMaxLength int = 63
-
-var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$")
-
// IsQualifiedName tests whether the value passed is what Kubernetes calls a
// "qualified name". This is a format used in various places throughout the
// system. If the value is not valid, a list of error strings is returned.
// Otherwise an empty list (or nil) is returned.
-func IsQualifiedName(value string) []string {
- var errs []string
- parts := strings.Split(value, "/")
- var name string
- switch len(parts) {
- case 1:
- name = parts[0]
- case 2:
- var prefix string
- prefix, name = parts[0], parts[1]
- if len(prefix) == 0 {
- errs = append(errs, "prefix part "+EmptyError())
- } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
- errs = append(errs, prefixEach(msgs, "prefix part ")...)
- }
- default:
- return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+
- " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')")
- }
-
- if len(name) == 0 {
- errs = append(errs, "name part "+EmptyError())
- } else if len(name) > qualifiedNameMaxLength {
- errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength))
- }
- if !qualifiedNameRegexp.MatchString(name) {
- errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc"))
- }
- return errs
-}
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.IsQualifiedName instead.
+var IsQualifiedName = content.IsLabelKey
// IsFullyQualifiedName checks if the name is fully qualified. This is similar
// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of
@@ -153,29 +117,44 @@ func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList {
return allErrs
}
-const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
-const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+// IsDomainPrefixedKey checks if the given key string is a domain-prefixed key
+// (e.g. acme.io/foo). All characters before the first "/" must be a valid
+// subdomain as defined by RFC 1123. All characters trailing the first "/" must
+// be non-empty and match the regex ^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$.
+func IsDomainPrefixedKey(fldPath *field.Path, key string) field.ErrorList {
+ var allErrs field.ErrorList
+ if len(key) == 0 {
+ return append(allErrs, field.Required(fldPath, ""))
+ }
+ for _, errMessages := range content.IsLabelKey(key) {
+ allErrs = append(allErrs, field.Invalid(fldPath, key, errMessages))
+ }
-// LabelValueMaxLength is a label's max length
-const LabelValueMaxLength int = 63
+ if len(allErrs) > 0 {
+ return allErrs
+ }
-var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+ segments := strings.Split(key, "/")
+ if len(segments) != 2 {
+ return append(allErrs, field.Invalid(fldPath, key, "must be a domain-prefixed key (such as \"acme.io/foo\")"))
+ }
+
+ return allErrs
+}
+
+// LabelValueMaxLength is a label's max length
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.LabelValueMaxLength instead.
+const LabelValueMaxLength int = content.LabelValueMaxLength
// IsValidLabelValue tests whether the value passed is a valid label value. If
// the value is not valid, a list of error strings is returned. Otherwise an
// empty list (or nil) is returned.
-func IsValidLabelValue(value string) []string {
- var errs []string
- if len(value) > LabelValueMaxLength {
- errs = append(errs, MaxLenError(LabelValueMaxLength))
- }
- if !labelValueRegexp.MatchString(value) {
- errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345"))
- }
- return errs
-}
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.IsLabelValue instead.
+var IsValidLabelValue = content.IsLabelValue
const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
+
const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
@@ -205,10 +184,14 @@ func IsDNS1123Label(value string) []string {
const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+const dns1123SubdomainFmtWithUnderscore string = dns1123LabelFmtWithUnderscore + "(\\." + dns1123LabelFmtWithUnderscore + ")*"
+const dns1123SubdomainErrorMsgFG string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '_', '-' or '.', and must start and end with an alphanumeric character"
+
// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
const DNS1123SubdomainMaxLength int = 253
var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+var dns1123SubdomainRegexpWithUnderscore = regexp.MustCompile("^" + dns1123SubdomainFmtWithUnderscore + "$")
// IsDNS1123Subdomain tests for a string that conforms to the definition of a
// subdomain in DNS (RFC 1123).
@@ -223,6 +206,19 @@ func IsDNS1123Subdomain(value string) []string {
return errs
}
+// IsDNS1123SubdomainWithUnderscore tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123), but allows the use of an underscore in the string
+func IsDNS1123SubdomainWithUnderscore(value string) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !dns1123SubdomainRegexpWithUnderscore.MatchString(value) {
+ errs = append(errs, RegexError(dns1123SubdomainErrorMsgFG, dns1123SubdomainFmtWithUnderscore, "example.com"))
+ }
+ return errs
+}
+
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
@@ -266,19 +262,10 @@ func IsWildcardDNS1123Subdomain(value string) []string {
return errs
}
-const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
-const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'"
-
-var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
-
// IsCIdentifier tests for a string that conforms the definition of an identifier
// in C. This checks the format, but not the length.
-func IsCIdentifier(value string) []string {
- if !cIdentifierRegexp.MatchString(value) {
- return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
- }
- return nil
-}
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.IsCIdentifier instead.
+var IsCIdentifier = content.IsCIdentifier
// IsValidPortNum tests that the argument is a valid, non-zero port number.
func IsValidPortNum(port int) []string {
@@ -351,34 +338,6 @@ func IsValidPortName(port string) []string {
return errs
}
-// IsValidIP tests that the argument is a valid IP address.
-func IsValidIP(value string) []string {
- if netutils.ParseIPSloppy(value) == nil {
- return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"}
- }
- return nil
-}
-
-// IsValidIPv4Address tests that the argument is a valid IPv4 address.
-func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
- var allErrors field.ErrorList
- ip := netutils.ParseIPSloppy(value)
- if ip == nil || ip.To4() == nil {
- allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
- }
- return allErrors
-}
-
-// IsValidIPv6Address tests that the argument is a valid IPv6 address.
-func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
- var allErrors field.ErrorList
- ip := netutils.ParseIPSloppy(value)
- if ip == nil || ip.To4() != nil {
- allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
- }
- return allErrors
-}
-
const percentFmt string = "[0-9]+%"
const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
@@ -409,6 +368,9 @@ func IsHTTPHeaderName(value string) []string {
const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*"
const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit"
+// TODO(hirazawaui): Rename this when the RelaxedEnvironmentVariableValidation gate is removed.
+const relaxedEnvVarNameFmtErrMsg string = "a valid environment variable name must consist only of printable ASCII characters other than '='"
+
var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$")
// IsEnvVarName tests if a string is a valid environment variable name.
@@ -422,6 +384,24 @@ func IsEnvVarName(value string) []string {
return errs
}
+// IsRelaxedEnvVarName tests if a string is a valid environment variable name.
+func IsRelaxedEnvVarName(value string) []string {
+ var errs []string
+
+ if len(value) == 0 {
+ errs = append(errs, "environment variable name "+EmptyError())
+ }
+
+ for _, r := range value {
+ if r > unicode.MaxASCII || !unicode.IsPrint(r) || r == '=' {
+ errs = append(errs, relaxedEnvVarNameFmtErrMsg)
+ break
+ }
+ }
+
+ return errs
+}
+
const configMapKeyFmt = `[-._a-zA-Z0-9]+`
const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'"
@@ -468,13 +448,6 @@ func EmptyError() string {
return "must be non-empty"
}
-func prefixEach(msgs []string, prefix string) []string {
- for i := range msgs {
- msgs[i] = prefix + msgs[i]
- }
- return msgs
-}
-
// InclusiveRangeError returns a string explanation of a numeric "must be
// between" validation failure.
func InclusiveRangeError(lo, hi int) string {
@@ -493,18 +466,3 @@ func hasChDirPrefix(value string) []string {
}
return errs
}
-
-// IsValidSocketAddr checks that string represents a valid socket address
-// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254))
-func IsValidSocketAddr(value string) []string {
- var errs []string
- ip, port, err := net.SplitHostPort(value)
- if err != nil {
- errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)")
- return errs
- }
- portInt, _ := strconv.Atoi(port)
- errs = append(errs, IsValidPortNum(portInt)...)
- errs = append(errs, IsValidIP(ip)...)
- return errs
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go
index 5b2b22b6d0..da88813da2 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go
@@ -15,4 +15,4 @@ limitations under the License.
*/
// Package version provides utilities for version number comparisons
-package version // import "k8s.io/apimachinery/pkg/util/version"
+package version
diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go
index 2292ba1376..72c0769e6c 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go
@@ -23,6 +23,8 @@ import (
"regexp"
"strconv"
"strings"
+
+ apimachineryversion "k8s.io/apimachinery/pkg/version"
)
// Version is an opaque representation of a version number
@@ -145,6 +147,43 @@ func MustParseGeneric(str string) *Version {
return v
}
+// Parse tries to do ParseSemantic first to keep more information.
+// If ParseSemantic fails, it would just do ParseGeneric.
+func Parse(str string) (*Version, error) {
+ v, err := parse(str, true)
+ if err != nil {
+ return parse(str, false)
+ }
+ return v, err
+}
+
+// MustParse is like Parse except that it panics on error
+func MustParse(str string) *Version {
+ v, err := Parse(str)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// ParseMajorMinor parses a "generic" version string and returns a version with the major and minor version.
+func ParseMajorMinor(str string) (*Version, error) {
+ v, err := ParseGeneric(str)
+ if err != nil {
+ return nil, err
+ }
+ return MajorMinor(v.Major(), v.Minor()), nil
+}
+
+// MustParseMajorMinor is like ParseMajorMinor except that it panics on error
+func MustParseMajorMinor(str string) *Version {
+ v, err := ParseMajorMinor(str)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
// ParseSemantic parses a version string that exactly obeys the syntax and semantics of
// the "Semantic Versioning" specification (http://semver.org/) (although it ignores
// leading and trailing whitespace, and allows the version to be preceded by "v"). For
@@ -215,6 +254,32 @@ func (v *Version) WithMinor(minor uint) *Version {
return &result
}
+// SubtractMinor returns the version with offset from the original minor, with the same major and no patch.
+// If -offset >= current minor, the minor would be 0.
+func (v *Version) OffsetMinor(offset int) *Version {
+ var minor uint
+ if offset >= 0 {
+ minor = v.Minor() + uint(offset)
+ } else {
+ diff := uint(-offset)
+ if diff < v.Minor() {
+ minor = v.Minor() - diff
+ }
+ }
+ return MajorMinor(v.Major(), minor)
+}
+
+// SubtractMinor returns the version diff minor versions back, with the same major and no patch.
+// If diff >= current minor, the minor would be 0.
+func (v *Version) SubtractMinor(diff uint) *Version {
+ return v.OffsetMinor(-int(diff))
+}
+
+// AddMinor returns the version diff minor versions forward, with the same major and no patch.
+func (v *Version) AddMinor(diff uint) *Version {
+ return v.OffsetMinor(int(diff))
+}
+
// WithPatch returns copy of the version object with requested patch number
func (v *Version) WithPatch(patch uint) *Version {
result := *v
@@ -224,6 +289,9 @@ func (v *Version) WithPatch(patch uint) *Version {
// WithPreRelease returns copy of the version object with requested prerelease
func (v *Version) WithPreRelease(preRelease string) *Version {
+ if len(preRelease) == 0 {
+ return v
+ }
result := *v
result.components = []uint{v.Major(), v.Minor(), v.Patch()}
result.preRelease = preRelease
@@ -345,6 +413,17 @@ func onlyZeros(array []uint) bool {
return true
}
+// EqualTo tests if a version is equal to a given version.
+func (v *Version) EqualTo(other *Version) bool {
+ if v == nil {
+ return other == nil
+ }
+ if other == nil {
+ return false
+ }
+ return v.compareInternal(other) == 0
+}
+
// AtLeast tests if a version is at least equal to a given minimum version. If both
// Versions are Semantic Versions, this will use the Semantic Version comparison
// algorithm. Otherwise, it will compare only the numeric components, with non-present
@@ -360,6 +439,11 @@ func (v *Version) LessThan(other *Version) bool {
return v.compareInternal(other) == -1
}
+// GreaterThan tests if a version is greater than a given version.
+func (v *Version) GreaterThan(other *Version) bool {
+ return v.compareInternal(other) == 1
+}
+
// Compare compares v against a version string (which will be parsed as either Semantic
// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if
// it is greater than other, or 0 if they are equal.
@@ -370,3 +454,31 @@ func (v *Version) Compare(other string) (int, error) {
}
return v.compareInternal(ov), nil
}
+
+// WithInfo returns copy of the version object.
+// Deprecated: The Info field has been removed from the Version struct. This method no longer modifies the Version object.
+func (v *Version) WithInfo(info apimachineryversion.Info) *Version {
+ result := *v
+ return &result
+}
+
+// Info returns the version information of a component.
+// Deprecated: Use Info() from effective version instead.
+func (v *Version) Info() *apimachineryversion.Info {
+ if v == nil {
+ return nil
+ }
+ // in case info is empty, or the major and minor in info is different from the actual major and minor
+ return &apimachineryversion.Info{
+ Major: Itoa(v.Major()),
+ Minor: Itoa(v.Minor()),
+ GitVersion: v.String(),
+ }
+}
+
+func Itoa(i uint) string {
+ if i == 0 {
+ return ""
+ }
+ return strconv.Itoa(int(i))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
index 4187619256..177be09a95 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
@@ -157,6 +157,8 @@ func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) Dela
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
+//
+// Contextual logging: UntilWithContext should be used instead of Until in code which supports contextual logging.
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
@@ -176,6 +178,8 @@ func UntilWithContext(ctx context.Context, f func(context.Context), period time.
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
+//
+// Contextual logging: NonSlidingUntilWithContext should be used instead of NonSlidingUntil in code which supports contextual logging.
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
@@ -200,19 +204,44 @@ func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), pe
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
+//
+// Contextual logging: JitterUntilWithContext should be used instead of JitterUntil in code which supports contextual logging.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
}
+// JitterUntilWithContext loops until context is done, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Cancel context to stop. f may not be invoked if context is already done.
+func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
+ BackoffUntilWithContext(ctx, f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding)
+}
+
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
+//
+// Contextual logging: BackoffUntilWithContext should be used instead of BackoffUntil in code which supports contextual logging.
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
+ BackoffUntilWithContext(ContextForChannel(stopCh), func(context.Context) { f() }, backoff, sliding)
+}
+
+// BackoffUntilWithContext loops until context is done, run f every duration given by BackoffManager.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+func BackoffUntilWithContext(ctx context.Context, f func(ctx context.Context), backoff BackoffManager, sliding bool) {
var t clock.Timer
for {
select {
- case <-stopCh:
+ case <-ctx.Done():
return
default:
}
@@ -222,8 +251,8 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
}
func() {
- defer runtime.HandleCrash()
- f()
+ defer runtime.HandleCrashWithContext(ctx)
+ f(ctx)
}()
if sliding {
@@ -236,7 +265,7 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
- case <-stopCh:
+ case <-ctx.Done():
if !t.Stop() {
<-t.C()
}
@@ -246,19 +275,6 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
}
}
-// JitterUntilWithContext loops until context is done, running f every period.
-//
-// If jitterFactor is positive, the period is jittered before every run of f.
-// If jitterFactor is not positive, the period is unchanged and not jittered.
-//
-// If sliding is true, the period is computed after f runs. If it is false then
-// period includes the runtime for f.
-//
-// Cancel context to stop. f may not be invoked if context is already expired.
-func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
- JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
-}
-
// backoffManager provides simple backoff behavior in a threadsafe manner to a caller.
type backoffManager struct {
backoff Backoff
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
index 3f0c968ec9..ff89dc170e 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package wait provides tools for polling or listening for changes
// to a condition.
-package wait // import "k8s.io/apimachinery/pkg/util/wait"
+package wait
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
index 107bfc132f..9f9b929ffa 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
@@ -49,7 +49,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding
// if we haven't requested immediate execution, delay once
if immediate {
if ok, err := func() (bool, error) {
- defer runtime.HandleCrash()
+ defer runtime.HandleCrashWithContext(ctx)
return condition(ctx)
}(); err != nil || ok {
return err
@@ -83,7 +83,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding
t.Next()
}
if ok, err := func() (bool, error) {
- defer runtime.HandleCrash()
+ defer runtime.HandleCrashWithContext(ctx)
return condition(ctx)
}(); err != nil || ok {
return err
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
index 6805e8cf94..7379a8d5ac 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -80,6 +80,10 @@ func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
+// jitterRand is a dedicated random source for jitter calculations.
+// It defaults to rand.Float64, but is a package variable so it can be overridden to make unit tests deterministic.
+var jitterRand = rand.Float64
+
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
@@ -89,7 +93,7 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
}
- wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
+ wait := duration + time.Duration(jitterRand()*maxFactor*float64(duration))
return wait
}
@@ -141,6 +145,7 @@ func (c channelContext) Value(key any) any { return nil }
//
// Deprecated: Will be removed when the legacy polling methods are removed.
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
+ //nolint:logcheck // Already deprecated.
defer runtime.HandleCrash()
return condition()
}
@@ -150,7 +155,7 @@ func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
//
// Deprecated: Will be removed when the legacy polling methods are removed.
func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
- defer runtime.HandleCrash()
+ defer runtime.HandleCrashWithContext(ctx)
return condition(ctx)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
index 9837b3df28..66bf31eea1 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -20,10 +20,12 @@ import (
"bufio"
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"strings"
"unicode"
+ "unicode/utf8"
jsonutil "k8s.io/apimachinery/pkg/util/json"
@@ -92,7 +94,7 @@ func UnmarshalStrict(data []byte, v interface{}) error {
// YAML decoding path is not used (so that error messages are
// JSON specific).
func ToJSON(data []byte) ([]byte, error) {
- if hasJSONPrefix(data) {
+ if IsJSONBuffer(data) {
return data, nil
}
return yaml.YAMLToJSON(data)
@@ -102,7 +104,8 @@ func ToJSON(data []byte) ([]byte, error) {
// separating individual documents. It first converts the YAML
// body to JSON, then unmarshals the JSON.
type YAMLToJSONDecoder struct {
- reader Reader
+ reader Reader
+ inputOffset int
}
// NewYAMLToJSONDecoder decodes YAML documents from the provided
@@ -121,7 +124,7 @@ func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
// yaml.Unmarshal.
func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
bytes, err := d.reader.Read()
- if err != nil && err != io.EOF {
+ if err != nil && err != io.EOF { //nolint:errorlint
return err
}
@@ -131,9 +134,14 @@ func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
return YAMLSyntaxError{err}
}
}
+ d.inputOffset += len(bytes)
return err
}
+func (d *YAMLToJSONDecoder) InputOffset() int {
+ return d.inputOffset
+}
+
// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
// the data is not sufficient.
type YAMLDecoder struct {
@@ -229,18 +237,22 @@ func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err
return 0, nil, nil
}
-// decoder is a convenience interface for Decode.
-type decoder interface {
- Decode(into interface{}) error
-}
-
-// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
-// YAML documents by sniffing for a leading { character.
+// YAMLOrJSONDecoder attempts to decode a stream of JSON or YAML documents.
+// While JSON is YAML, the way Go's JSON decode defines a multi-document stream
+// is a series of JSON objects (e.g. {}{}), but YAML defines a multi-document
+// stream as a series of documents separated by "---".
+//
+// This decoder will attempt to decode the stream as JSON first, and if that
+// fails, it will switch to YAML. Once it determines the stream is JSON (by
+// finding a non-YAML-delimited series of objects), it will not switch to YAML.
+// Once it switches to YAML it will not switch back to JSON.
type YAMLOrJSONDecoder struct {
- r io.Reader
- bufferSize int
-
- decoder decoder
+ json *json.Decoder
+ jsonConsumed int64 // of the stream total, how much was JSON?
+ yaml *YAMLToJSONDecoder
+ yamlConsumed int64 // of the stream total, how much was YAML?
+ stream *StreamReader
+ count int // how many objects have been decoded
}
type JSONSyntaxError struct {
@@ -265,31 +277,113 @@ func (e YAMLSyntaxError) Error() string {
// how far into the stream the decoder will look to figure out whether this
// is a JSON stream (has whitespace followed by an open brace).
func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
- return &YAMLOrJSONDecoder{
- r: r,
- bufferSize: bufferSize,
+ d := &YAMLOrJSONDecoder{}
+
+ reader, _, mightBeJSON := GuessJSONStream(r, bufferSize)
+ d.stream = reader
+ if mightBeJSON {
+ d.json = json.NewDecoder(reader)
+ } else {
+ d.yaml = NewYAMLToJSONDecoder(reader)
}
+ return d
}
// Decode unmarshals the next object from the underlying stream into the
// provide object, or returns an error.
func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
- if d.decoder == nil {
- buffer, _, isJSON := GuessJSONStream(d.r, d.bufferSize)
- if isJSON {
- d.decoder = json.NewDecoder(buffer)
+ // Because we don't know if this is a JSON or YAML stream, a failure from
+ // both decoders is ambiguous. When in doubt, it will return the error from
+ // the JSON decoder. Unfortunately, this means that if the first document
+ // is invalid YAML, the error won't be awesome.
+ // TODO: the errors from YAML are not great, we could improve them a lot.
+ var firstErr error
+ if d.json != nil {
+ err := d.json.Decode(into)
+ if err == nil {
+ d.count++
+ consumed := d.json.InputOffset() - d.jsonConsumed
+ d.stream.Consume(int(consumed))
+ d.jsonConsumed += consumed
+ return nil
+ }
+ if err == io.EOF { //nolint:errorlint
+ return err
+ }
+ var syntax *json.SyntaxError
+ if ok := errors.As(err, &syntax); ok {
+ firstErr = JSONSyntaxError{
+ Offset: syntax.Offset,
+ Err: syntax,
+ }
} else {
- d.decoder = NewYAMLToJSONDecoder(buffer)
+ firstErr = err
+ }
+ if d.count > 1 {
+ // If we found 0 or 1 JSON object(s), this stream is still
+ // ambiguous. But if we found more than 1 JSON object, then this
+ // is an unambiguous JSON stream, and we should not switch to YAML.
+ return err
+ }
+ // If JSON decoding hits the end of one object and then fails on the
+ // next, it leaves any leading whitespace in the buffer, which can
+ // confuse the YAML decoder. We just eat any whitespace we find, up to
+ // and including the first newline.
+ d.stream.Rewind()
+ if err := d.consumeWhitespace(); err == nil {
+ d.yaml = NewYAMLToJSONDecoder(d.stream)
+ }
+ d.json = nil
+ }
+ if d.yaml != nil {
+ err := d.yaml.Decode(into)
+ if err == nil {
+ d.count++
+ consumed := int64(d.yaml.InputOffset()) - d.yamlConsumed
+ d.stream.Consume(int(consumed))
+ d.yamlConsumed += consumed
+ return nil
+ }
+ if err == io.EOF { //nolint:errorlint
+ return err
+ }
+ if firstErr == nil {
+ firstErr = err
}
}
- err := d.decoder.Decode(into)
- if syntax, ok := err.(*json.SyntaxError); ok {
- return JSONSyntaxError{
- Offset: syntax.Offset,
- Err: syntax,
+ if firstErr != nil {
+ return firstErr
+ }
+ return fmt.Errorf("decoding failed as both JSON and YAML")
+}
+
+func (d *YAMLOrJSONDecoder) consumeWhitespace() error {
+ consumed := 0
+ for {
+ buf, err := d.stream.ReadN(4)
+ if err != nil && err == io.EOF { //nolint:errorlint
+ return err
+ }
+ r, sz := utf8.DecodeRune(buf)
+ if r == utf8.RuneError || sz == 0 {
+ return fmt.Errorf("invalid utf8 rune")
+ }
+ d.stream.RewindN(len(buf) - sz)
+ if !unicode.IsSpace(r) {
+ d.stream.RewindN(sz)
+ d.stream.Consume(consumed)
+ return nil
+ }
+ consumed += sz
+ if r == '\n' {
+ d.stream.Consume(consumed)
+ return nil
+ }
+ if err == io.EOF { //nolint:errorlint
+ break
}
}
- return err
+ return io.EOF
}
type Reader interface {
@@ -311,7 +405,7 @@ func (r *YAMLReader) Read() ([]byte, error) {
var buffer bytes.Buffer
for {
line, err := r.reader.Read()
- if err != nil && err != io.EOF {
+ if err != nil && err != io.EOF { //nolint:errorlint
return nil, err
}
@@ -329,11 +423,11 @@ func (r *YAMLReader) Read() ([]byte, error) {
if buffer.Len() != 0 {
return buffer.Bytes(), nil
}
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
return nil, err
}
}
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
if buffer.Len() != 0 {
// If we're at EOF, we have a final, non-terminated line. Return it.
return buffer.Bytes(), nil
@@ -369,26 +463,20 @@ func (r *LineReader) Read() ([]byte, error) {
// GuessJSONStream scans the provided reader up to size, looking
// for an open brace indicating this is JSON. It will return the
// bufio.Reader it creates for the consumer.
-func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
- buffer := bufio.NewReaderSize(r, size)
+func GuessJSONStream(r io.Reader, size int) (*StreamReader, []byte, bool) {
+ buffer := NewStreamReader(r, size)
b, _ := buffer.Peek(size)
- return buffer, b, hasJSONPrefix(b)
+ return buffer, b, IsJSONBuffer(b)
}
// IsJSONBuffer scans the provided buffer, looking
// for an open brace indicating this is JSON.
func IsJSONBuffer(buf []byte) bool {
- return hasJSONPrefix(buf)
+ return hasPrefix(buf, jsonPrefix)
}
var jsonPrefix = []byte("{")
-// hasJSONPrefix returns true if the provided buffer appears to start with
-// a JSON open brace.
-func hasJSONPrefix(buf []byte) bool {
- return hasPrefix(buf, jsonPrefix)
-}
-
// Return true if the first non-whitespace bytes in buf is
// prefix.
func hasPrefix(buf []byte, prefix []byte) bool {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
new file mode 100644
index 0000000000..d06991057f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import "io"
+
+// StreamReader is a reader designed for consuming streams of variable-length
+// messages. It buffers data until it is explicitly consumed, and can be
+// rewound to re-read previous data.
+type StreamReader struct {
+ r io.Reader
+ buf []byte
+ head int // current read offset into buf
+ ttlConsumed int // number of bytes which have been consumed
+}
+
+// NewStreamReader creates a new StreamReader wrapping the provided
+// io.Reader.
+func NewStreamReader(r io.Reader, size int) *StreamReader {
+ if size == 0 {
+ size = 4096
+ }
+ return &StreamReader{
+ r: r,
+ buf: make([]byte, 0, size), // Start with a reasonable capacity
+ }
+}
+
+// Read implements io.Reader. It first returns any buffered data after the
+// current offset, and if that's exhausted, reads from the underlying reader
+// and buffers the data. The returned data is not considered consumed until the
+// Consume method is called.
+func (r *StreamReader) Read(p []byte) (n int, err error) {
+ // If we have buffered data, return it
+ if r.head < len(r.buf) {
+ n = copy(p, r.buf[r.head:])
+ r.head += n
+ return n, nil
+ }
+
+ // If we've already hit EOF, return it
+ if r.r == nil {
+ return 0, io.EOF
+ }
+
+ // Read from the underlying reader
+ n, err = r.r.Read(p)
+ if n > 0 {
+ r.buf = append(r.buf, p[:n]...)
+ r.head += n
+ }
+ if err == nil {
+ return n, nil
+ }
+ if err == io.EOF {
+ // Store that we've hit EOF by setting r to nil
+ r.r = nil
+ }
+ return n, err
+}
+
+// ReadN reads exactly n bytes from the reader, blocking until all bytes are
+// read or an error occurs. If an error occurs, the number of bytes read is
+// returned along with the error. If EOF is hit before n bytes are read, this
+// will return the bytes read so far, along with io.EOF. The returned data is
+// not considered consumed until the Consume method is called.
+func (r *StreamReader) ReadN(want int) ([]byte, error) {
+ ret := make([]byte, want)
+ off := 0
+ for off < want {
+ n, err := r.Read(ret[off:])
+ if err != nil {
+ return ret[:off+n], err
+ }
+ off += n
+ }
+ return ret, nil
+}
+
+// Peek returns the next n bytes without advancing the reader. The returned
+// bytes are valid until the next call to Consume.
+func (r *StreamReader) Peek(n int) ([]byte, error) {
+ buf, err := r.ReadN(n)
+ r.RewindN(len(buf))
+ if err != nil {
+ return buf, err
+ }
+ return buf, nil
+}
+
+// Rewind resets the reader to the beginning of the buffered data.
+func (r *StreamReader) Rewind() {
+ r.head = 0
+}
+
+// RewindN rewinds the reader by n bytes. If n is greater than the current
+// buffer, the reader is rewound to the beginning of the buffer.
+func (r *StreamReader) RewindN(n int) {
+ r.head -= min(n, r.head)
+}
+
+// Consume discards up to n bytes of previously read data from the beginning of
+// the buffer. Once consumed, that data is no longer available for rewinding.
+// If n is greater than the current buffer, the buffer is cleared. Consume
+// never consume data from the underlying reader.
+func (r *StreamReader) Consume(n int) {
+ n = min(n, len(r.buf))
+ r.buf = r.buf[n:]
+ r.head -= n
+ r.ttlConsumed += n
+}
+
+// Consumed returns the number of bytes consumed from the input reader.
+func (r *StreamReader) Consumed() int {
+ return r.ttlConsumed
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go
index 29574fd6d5..70e3f76b23 100644
--- a/vendor/k8s.io/apimachinery/pkg/version/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go
@@ -15,6 +15,8 @@ limitations under the License.
*/
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.version
+//
-// Package version supplies the type for version information collected at build time.
-package version // import "k8s.io/apimachinery/pkg/version"
+// Package version supplies the type for version information.
+package version
diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go
index 72727b503b..6a18f9e91d 100644
--- a/vendor/k8s.io/apimachinery/pkg/version/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/version/types.go
@@ -20,15 +20,25 @@ package version
// TODO: Add []string of api versions supported? It's still unclear
// how we'll want to distribute that information.
type Info struct {
- Major string `json:"major"`
- Minor string `json:"minor"`
- GitVersion string `json:"gitVersion"`
- GitCommit string `json:"gitCommit"`
- GitTreeState string `json:"gitTreeState"`
- BuildDate string `json:"buildDate"`
- GoVersion string `json:"goVersion"`
- Compiler string `json:"compiler"`
- Platform string `json:"platform"`
+ // Major is the major version of the binary version
+ Major string `json:"major"`
+ // Minor is the minor version of the binary version
+ Minor string `json:"minor"`
+ // EmulationMajor is the major version of the emulation version
+ EmulationMajor string `json:"emulationMajor,omitempty"`
+ // EmulationMinor is the minor version of the emulation version
+ EmulationMinor string `json:"emulationMinor,omitempty"`
+ // MinCompatibilityMajor is the major version of the minimum compatibility version
+ MinCompatibilityMajor string `json:"minCompatibilityMajor,omitempty"`
+ // MinCompatibilityMinor is the minor version of the minimum compatibility version
+ MinCompatibilityMinor string `json:"minCompatibilityMinor,omitempty"`
+ GitVersion string `json:"gitVersion"`
+ GitCommit string `json:"gitCommit"`
+ GitTreeState string `json:"gitTreeState"`
+ BuildDate string `json:"buildDate"`
+ GoVersion string `json:"goVersion"`
+ Compiler string `json:"compiler"`
+ Platform string `json:"platform"`
}
// String returns info as a human-friendly version string.
diff --git a/vendor/k8s.io/apimachinery/pkg/version/zz_generated.model_name.go b/vendor/k8s.io/apimachinery/pkg/version/zz_generated.model_name.go
new file mode 100644
index 0000000000..e5a6d395ad
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/zz_generated.model_name.go
@@ -0,0 +1,27 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package version
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Info) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.version.Info"
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
index 7e6bf3fb95..5fde5e7427 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
@@ -16,4 +16,4 @@ limitations under the License.
// Package watch contains a generic watchable interface, and a fake for
// testing code that uses the watch interface.
-package watch // import "k8s.io/apimachinery/pkg/watch"
+package watch
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
index 42dcac2b9e..b422ca9f55 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
@@ -51,6 +51,7 @@ type Reporter interface {
// StreamWatcher turns any stream for which you can write a Decoder interface
// into a watch.Interface.
type StreamWatcher struct {
+ logger klog.Logger
sync.Mutex
source Decoder
reporter Reporter
@@ -59,8 +60,16 @@ type StreamWatcher struct {
}
// NewStreamWatcher creates a StreamWatcher from the given decoder.
+//
+// Contextual logging: NewStreamWatcherWithLogger should be used instead of NewStreamWatcher in code which supports contextual logging.
func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher {
+ return NewStreamWatcherWithLogger(klog.Background(), d, r)
+}
+
+// NewStreamWatcherWithLogger creates a StreamWatcher from the given decoder and logger.
+func NewStreamWatcherWithLogger(logger klog.Logger, d Decoder, r Reporter) *StreamWatcher {
sw := &StreamWatcher{
+ logger: logger,
source: d,
reporter: r,
// It's easy for a consumer to add buffering via an extra
@@ -98,7 +107,7 @@ func (sw *StreamWatcher) Stop() {
// receive reads result from the decoder in a loop and sends down the result channel.
func (sw *StreamWatcher) receive() {
- defer utilruntime.HandleCrash()
+ defer utilruntime.HandleCrashWithLogger(sw.logger)
defer close(sw.result)
defer sw.Stop()
for {
@@ -108,10 +117,10 @@ func (sw *StreamWatcher) receive() {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
- klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
+ sw.logger.V(1).Info("Unexpected EOF during watch stream event decoding", "err", err)
default:
if net.IsProbableEOF(err) || net.IsTimeout(err) {
- klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
+ sw.logger.V(5).Info("Unable to decode an event from the watch stream", "err", err)
} else {
select {
case <-sw.done:
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
index b6c7bbfa8f..251459834b 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
@@ -23,17 +23,30 @@ import (
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
)
// Interface can be implemented by anything that knows how to watch and report changes.
type Interface interface {
- // Stop stops watching. Will close the channel returned by ResultChan(). Releases
- // any resources used by the watch.
+ // Stop tells the producer that the consumer is done watching, so the
+ // producer should stop sending events and close the result channel. The
+ // consumer should keep watching for events until the result channel is
+ // closed.
+ //
+ // Because some implementations may create channels when constructed, Stop
+ // must always be called, even if the consumer has not yet called
+ // ResultChan().
+ //
+ // Only the consumer should call Stop(), not the producer. If the producer
+ // errors and needs to stop the watch prematurely, it should instead send
+ // an error event and close the result channel.
Stop()
- // ResultChan returns a chan which will receive all the events. If an error occurs
- // or Stop() is called, the implementation will close this channel and
- // release any resources used by the watch.
+ // ResultChan returns a channel which will receive events from the event
+ // producer. If an error occurs or Stop() is called, the producer must
+ // close this channel and release any resources used by the watch.
+ // Closing the result channel tells the consumer that no more events will be
+ // sent.
ResultChan() <-chan Event
}
@@ -91,29 +104,42 @@ func (w emptyWatch) ResultChan() <-chan Event {
// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
type FakeWatcher struct {
+ logger klog.Logger
result chan Event
stopped bool
sync.Mutex
}
+var _ Interface = &FakeWatcher{}
+
+// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging.
func NewFake() *FakeWatcher {
- return &FakeWatcher{
- result: make(chan Event),
- }
+ return NewFakeWithOptions(FakeOptions{})
}
+// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging.
func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
+ return NewFakeWithOptions(FakeOptions{ChannelSize: size})
+}
+
+func NewFakeWithOptions(options FakeOptions) *FakeWatcher {
return &FakeWatcher{
- result: make(chan Event, size),
+ logger: ptr.Deref(options.Logger, klog.Background()),
+ result: make(chan Event, options.ChannelSize),
}
}
+type FakeOptions struct {
+ Logger *klog.Logger
+ ChannelSize int
+}
+
// Stop implements Interface.Stop().
func (f *FakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.stopped {
- klog.V(4).Infof("Stopping fake watcher.")
+ f.logger.V(4).Info("Stopping fake watcher")
close(f.result)
f.stopped = true
}
@@ -164,13 +190,22 @@ func (f *FakeWatcher) Action(action EventType, obj runtime.Object) {
// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
type RaceFreeFakeWatcher struct {
+ logger klog.Logger
result chan Event
Stopped bool
sync.Mutex
}
+var _ Interface = &RaceFreeFakeWatcher{}
+
+// Contextual logging: RaceFreeFakeWatcherWithLogger should be used instead of NewRaceFreeFake in code which supports contextual logging.
func NewRaceFreeFake() *RaceFreeFakeWatcher {
+ return NewRaceFreeFakeWithLogger(klog.Background())
+}
+
+func NewRaceFreeFakeWithLogger(logger klog.Logger) *RaceFreeFakeWatcher {
return &RaceFreeFakeWatcher{
+ logger: logger,
result: make(chan Event, DefaultChanSize),
}
}
@@ -180,7 +215,7 @@ func (f *RaceFreeFakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
- klog.V(4).Infof("Stopping fake watcher.")
+ f.logger.V(4).Info("Stopping fake watcher")
close(f.result)
f.Stopped = true
}
@@ -322,3 +357,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event {
func (pw *ProxyWatcher) StopChan() <-chan struct{} {
return pw.stopCh
}
+
+// MockWatcher implements watch.Interface with mockable functions.
+type MockWatcher struct {
+ StopFunc func()
+ ResultChanFunc func() <-chan Event
+}
+
+var _ Interface = &MockWatcher{}
+
+// Stop calls StopFunc
+func (mw MockWatcher) Stop() {
+ mw.StopFunc()
+}
+
+// ResultChan calls ResultChanFunc
+func (mw MockWatcher) ResultChan() <-chan Event {
+ return mw.ResultChanFunc()
+}
diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS
index a2fe8f351b..7500475a64 100644
--- a/vendor/k8s.io/klog/v2/OWNERS
+++ b/vendor/k8s.io/klog/v2/OWNERS
@@ -1,14 +1,16 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- harshanarayana
+ - mengjiao-liu
- pohly
approvers:
- dims
+ - pohly
- thockin
- - serathius
emeritus_approvers:
- brancz
- justinsb
- lavalamp
- piosz
+ - serathius
- tallclair
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/k8s.io/klog/v2/contextual_slog.go
similarity index 59%
rename from vendor/sigs.k8s.io/yaml/yaml_go110.go
rename to vendor/k8s.io/klog/v2/contextual_slog.go
index 94abc1719d..d3b562521d 100644
--- a/vendor/sigs.k8s.io/yaml/yaml_go110.go
+++ b/vendor/k8s.io/klog/v2/contextual_slog.go
@@ -1,7 +1,5 @@
-// This file contains changes that are only compatible with go 1.10 and onwards.
-
-//go:build go1.10
-// +build go1.10
+//go:build go1.21
+// +build go1.21
/*
Copyright 2021 The Kubernetes Authors.
@@ -19,13 +17,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package yaml
+package klog
+
+import (
+ "log/slog"
-import "encoding/json"
+ "github.com/go-logr/logr"
+)
-// DisallowUnknownFields configures the JSON decoder to error out if unknown
-// fields come along, instead of dropping them by default.
-func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
- d.DisallowUnknownFields()
- return d
+// SetSlogLogger reconfigures klog to log through the slog logger. The logger must not be nil.
+func SetSlogLogger(logger *slog.Logger) {
+ SetLoggerWithOptions(logr.FromSlogHandler(logger.Handler()), ContextualLogger(true))
}
diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
index 72502db3ae..47ec9466a6 100644
--- a/vendor/k8s.io/klog/v2/klog.go
+++ b/vendor/k8s.io/klog/v2/klog.go
@@ -14,9 +14,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
-// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
-// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+// Package klog contains the following functionality:
+//
+// - output routing as defined via command line flags ([InitFlags])
+// - log formatting as text, either with a single, unstructured string ([Info], [Infof], etc.)
+// or as a structured log entry with message and key/value pairs ([InfoS], etc.)
+// - management of a go-logr [Logger] ([SetLogger], [Background], [TODO])
+// - helper functions for logging values ([Format]) and managing the state of klog ([CaptureState], [State.Restore])
+// - wrappers for [logr] APIs for contextual logging where the wrappers can
+// be turned into no-ops ([EnableContextualLogging], [NewContext], [FromContext],
+// [LoggerWithValues], [LoggerWithName]); if the ability to turn off
+// contextual logging is not needed, then go-logr can also be used directly
+// - type aliases for go-logr types to simplify imports in code which uses both (e.g. [Logger])
+// - [k8s.io/klog/v2/textlogger]: a logger which uses the same formatting as klog log with
+// simpler output routing; beware that it comes with its own command line flags
+// and does not use the ones from klog
+// - [k8s.io/klog/v2/ktesting]: per-test output in Go unit tests
+// - [k8s.io/klog/v2/klogr]: a deprecated, standalone [logr.Logger] on top of the main klog package;
+// use [Background] instead if klog output routing is needed, [k8s.io/klog/v2/textlogger] if not
+// - [k8s.io/klog/v2/examples]: demos of this functionality
+// - [k8s.io/klog/v2/test]: reusable tests for [logr.Logger] implementations
//
// Basic examples:
//
@@ -387,13 +404,6 @@ func (t *traceLocation) Set(value string) error {
return nil
}
-// flushSyncWriter is the interface satisfied by logging destinations.
-type flushSyncWriter interface {
- Flush() error
- Sync() error
- io.Writer
-}
-
var logging loggingT
var commandLine flag.FlagSet
@@ -469,7 +479,7 @@ type settings struct {
// Access to all of the following fields must be protected via a mutex.
// file holds writer for each of the log types.
- file [severity.NumSeverity]flushSyncWriter
+ file [severity.NumSeverity]io.Writer
// flushInterval is the interval for periodic flushing. If zero,
// the global default will be used.
flushInterval time.Duration
@@ -814,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
buffer.PutBuffer(b)
}
-// redirectBuffer is used to set an alternate destination for the logs
-type redirectBuffer struct {
- w io.Writer
-}
-
-func (rb *redirectBuffer) Sync() error {
- return nil
-}
-
-func (rb *redirectBuffer) Flush() error {
- return nil
-}
-
-func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
- return rb.w.Write(bytes)
-}
-
// SetOutput sets the output destination for all severities
func SetOutput(w io.Writer) {
logging.mu.Lock()
defer logging.mu.Unlock()
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[s] = rb
+ logging.file[s] = w
}
}
@@ -851,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) {
if !ok {
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
}
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[sev] = rb
+ logging.file[sev] = w
}
// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
@@ -994,7 +981,8 @@ func (l *loggingT) exit(err error) {
logExitFunc(err)
return
}
- l.flushAll()
+ needToSync := l.flushAll()
+ l.syncAll(needToSync)
OsExit(2)
}
@@ -1011,10 +999,6 @@ type syncBuffer struct {
maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
}
-func (sb *syncBuffer) Sync() error {
- return sb.file.Sync()
-}
-
// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
func CalculateMaxSize() uint64 {
if logging.logFile != "" {
@@ -1206,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) {
// lockAndFlushAll is like flushAll but locks l.mu first.
func (l *loggingT) lockAndFlushAll() {
l.mu.Lock()
- l.flushAll()
+ needToSync := l.flushAll()
l.mu.Unlock()
+ // Some environments are slow when syncing and holding the lock might cause contention.
+ l.syncAll(needToSync)
}
-// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// flushAll flushes all the logs
// l.mu is held.
-func (l *loggingT) flushAll() {
+//
+// The result is the number of files which need to be synced and the pointers to them.
+func (l *loggingT) flushAll() fileArray {
+ var needToSync fileArray
+
// Flush from fatal down, in case there's trouble flushing.
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
file := l.file[s]
- if file != nil {
- _ = file.Flush() // ignore error
- _ = file.Sync() // ignore error
+ if sb, ok := file.(*syncBuffer); ok && sb.file != nil {
+ _ = sb.Flush() // ignore error
+ needToSync.files[needToSync.num] = sb.file
+ needToSync.num++
}
}
if logging.loggerOptions.flush != nil {
logging.loggerOptions.flush()
}
+ return needToSync
+}
+
+type fileArray struct {
+ num int
+ files [severity.NumSeverity]*os.File
+}
+
+// syncAll attempts to "sync" their data to disk.
+func (l *loggingT) syncAll(needToSync fileArray) {
+ // Flush from fatal down, in case there's trouble flushing.
+ for i := 0; i < needToSync.num; i++ {
+ _ = needToSync.files[i].Sync() // ignore error
+ }
}
// CopyStandardLogTo arranges for messages written to the Go "log" package's
diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go
index f7bf740306..c77d7baafa 100644
--- a/vendor/k8s.io/klog/v2/klogr_slog.go
+++ b/vendor/k8s.io/klog/v2/klogr_slog.go
@@ -25,7 +25,7 @@ import (
"strconv"
"time"
- "github.com/go-logr/logr/slogr"
+ "github.com/go-logr/logr"
"k8s.io/klog/v2/internal/buffer"
"k8s.io/klog/v2/internal/serialize"
@@ -35,7 +35,7 @@ import (
func (l *klogger) Handle(ctx context.Context, record slog.Record) error {
if logging.logger != nil {
- if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok {
+ if slogSink, ok := logging.logger.GetSink().(logr.SlogSink); ok {
// Let that logger do the work.
return slogSink.Handle(ctx, record)
}
@@ -77,13 +77,13 @@ func slogOutput(file string, line int, now time.Time, err error, s severity.Seve
buffer.PutBuffer(b)
}
-func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink {
+func (l *klogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
clone := *l
clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs))
return &clone
}
-func (l *klogger) WithGroup(name string) slogr.SlogSink {
+func (l *klogger) WithGroup(name string) logr.SlogSink {
clone := *l
if clone.groups != "" {
clone.groups += "." + name
@@ -93,4 +93,4 @@ func (l *klogger) WithGroup(name string) slogr.SlogSink {
return &clone
}
-var _ slogr.SlogSink = &klogger{}
+var _ logr.SlogSink = &klogger{}
diff --git a/vendor/k8s.io/klog/v2/safeptr.go b/vendor/k8s.io/klog/v2/safeptr.go
new file mode 100644
index 0000000000..bbe24c2e82
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/safeptr.go
@@ -0,0 +1,34 @@
+//go:build go1.18
+// +build go1.18
+
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+// SafePtr is a function that takes a pointer of any type (T) as an argument.
+// If the provided pointer is not nil, it returns the same pointer. If it is nil, it returns nil instead.
+//
+// This function is particularly useful to prevent nil pointer dereferencing when:
+//
+// - The type implements interfaces that are called by the logger, such as `fmt.Stringer`.
+// - And these interface implementations do not perform nil checks themselves.
+func SafePtr[T any](p *T) any {
+ if p == nil {
+ return nil
+ }
+ return p
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go
index 2e15e163c5..da2e8f11ad 100644
--- a/vendor/k8s.io/kube-openapi/pkg/common/common.go
+++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go
@@ -48,11 +48,11 @@ type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition
// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when
// possible.
type OpenAPIDefinitionGetter interface {
- OpenAPIDefinition() *OpenAPIDefinition
+ OpenAPIDefinition() OpenAPIDefinition
}
type OpenAPIV3DefinitionGetter interface {
- OpenAPIV3Definition() *OpenAPIDefinition
+ OpenAPIV3Definition() OpenAPIDefinition
}
type PathHandler interface {
@@ -164,6 +164,9 @@ type OpenAPIV3Config struct {
// It is an optional function to customize model names.
GetDefinitionName func(name string) (string, spec.Extensions)
+ // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving.
+ PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error)
+
// SecuritySchemes is list of all security schemes for OpenAPI service.
SecuritySchemes spec3.SecuritySchemes
diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
index fc45634887..10f0b385fa 100644
--- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
+++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
@@ -29,10 +29,10 @@ import (
"sync"
"time"
- "github.com/golang/protobuf/proto"
openapi_v3 "github.com/google/gnostic-models/openapiv3"
"github.com/google/uuid"
"github.com/munnerz/goautoneg"
+ "google.golang.org/protobuf/proto"
"k8s.io/klog/v2"
"k8s.io/kube-openapi/pkg/cached"
diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go
index 61141a500d..81280aae64 100644
--- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go
+++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go
@@ -22,7 +22,7 @@ import (
"strings"
"k8s.io/kube-openapi/pkg/validation/spec"
- "sigs.k8s.io/structured-merge-diff/v4/schema"
+ "sigs.k8s.io/structured-merge-diff/v6/schema"
)
// ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema.
diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go
index 2c6fd76a91..e40f6056e7 100644
--- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go
+++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go
@@ -22,7 +22,7 @@ import (
"strings"
"k8s.io/kube-openapi/pkg/util/proto"
- "sigs.k8s.io/structured-merge-diff/v4/schema"
+ "sigs.k8s.io/structured-merge-diff/v6/schema"
)
// ToSchema converts openapi definitions into a schema suitable for structured
diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
index 799d866d51..c4a083cb41 100644
--- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
+++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
@@ -20,7 +20,7 @@ import (
"fmt"
"sort"
- "sigs.k8s.io/structured-merge-diff/v4/schema"
+ "sigs.k8s.io/structured-merge-diff/v6/schema"
)
const (
@@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) {
}
}
- if union.Discriminator != nil && len(union.Fields) == 0 {
- return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator)
- }
return union, nil
}
diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go
index 08b6246ceb..25e4fd09eb 100644
--- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go
+++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go
@@ -4,7 +4,7 @@ import (
"math/rand"
"strings"
- fuzz "github.com/google/gofuzz"
+ "sigs.k8s.io/randfill"
"k8s.io/kube-openapi/pkg/validation/spec"
)
@@ -25,15 +25,15 @@ func randAlphanumString() string {
}
var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
- func(s *string, c fuzz.Continue) {
+ func(s *string, c randfill.Continue) {
// All OpenAPI V3 map keys must follow the corresponding
// regex. Note that this restricts the range for all other
// string values as well.
str := randAlphanumString()
*s = str
},
- func(o *OpenAPI, c fuzz.Continue) {
- c.FuzzNoCustom(o)
+ func(o *OpenAPI, c randfill.Continue) {
+ c.FillNoCustom(o)
o.Version = "3.0.0"
for i, val := range o.SecurityRequirement {
if val == nil {
@@ -48,45 +48,45 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
}
},
- func(r *interface{}, c fuzz.Continue) {
+ func(r *interface{}, c randfill.Continue) {
switch c.Intn(3) {
case 0:
*r = nil
case 1:
- n := c.RandString() + "x"
+ n := c.String(0) + "x"
*r = n
case 2:
n := c.Float64()
*r = n
}
},
- func(v **spec.Info, c fuzz.Continue) {
+ func(v **spec.Info, c randfill.Continue) {
// Info is never nil
*v = &spec.Info{}
- c.FuzzNoCustom(*v)
- (*v).Title = c.RandString() + "x"
+ c.FillNoCustom(*v)
+ (*v).Title = c.String(0) + "x"
},
- func(v *Paths, c fuzz.Continue) {
- c.Fuzz(&v.VendorExtensible)
+ func(v *Paths, c randfill.Continue) {
+ c.Fill(&v.VendorExtensible)
num := c.Intn(5)
if num > 0 {
v.Paths = make(map[string]*Path)
}
for i := 0; i < num; i++ {
val := Path{}
- c.Fuzz(&val)
- v.Paths["/"+c.RandString()] = &val
+ c.Fill(&val)
+ v.Paths["/"+c.String(0)] = &val
}
},
- func(v *SecurityScheme, c fuzz.Continue) {
+ func(v *SecurityScheme, c randfill.Continue) {
if c.Intn(refChance) == 0 {
- c.Fuzz(&v.Refable)
+ c.Fill(&v.Refable)
return
}
switch c.Intn(4) {
case 0:
v.Type = "apiKey"
- v.Name = c.RandString() + "x"
+ v.Name = c.String(0) + "x"
switch c.Intn(3) {
case 0:
v.In = "query"
@@ -101,17 +101,17 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
v.Type = "oauth2"
v.Flows = make(map[string]*OAuthFlow)
flow := OAuthFlow{}
- flow.AuthorizationUrl = c.RandString() + "x"
+ flow.AuthorizationUrl = c.String(0) + "x"
v.Flows["implicit"] = &flow
flow.Scopes = make(map[string]string)
flow.Scopes["foo"] = "bar"
case 3:
v.Type = "openIdConnect"
- v.OpenIdConnectUrl = "https://" + c.RandString()
+ v.OpenIdConnectUrl = "https://" + c.String(0)
}
v.Scheme = "basic"
},
- func(v *spec.Ref, c fuzz.Continue) {
+ func(v *spec.Ref, c randfill.Continue) {
switch c.Intn(7) {
case 0:
*v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString())
@@ -127,13 +127,13 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
*v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString())
}
},
- func(v *Parameter, c fuzz.Continue) {
+ func(v *Parameter, c randfill.Continue) {
if c.Intn(refChance) == 0 {
- c.Fuzz(&v.Refable)
+ c.Fill(&v.Refable)
return
}
- c.Fuzz(&v.ParameterProps)
- c.Fuzz(&v.VendorExtensible)
+ c.Fill(&v.ParameterProps)
+ c.Fill(&v.VendorExtensible)
switch c.Intn(3) {
case 0:
@@ -145,44 +145,44 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
v.In = "cookie"
}
},
- func(v *RequestBody, c fuzz.Continue) {
+ func(v *RequestBody, c randfill.Continue) {
if c.Intn(refChance) == 0 {
- c.Fuzz(&v.Refable)
+ c.Fill(&v.Refable)
return
}
- c.Fuzz(&v.RequestBodyProps)
- c.Fuzz(&v.VendorExtensible)
+ c.Fill(&v.RequestBodyProps)
+ c.Fill(&v.VendorExtensible)
},
- func(v *Header, c fuzz.Continue) {
+ func(v *Header, c randfill.Continue) {
if c.Intn(refChance) == 0 {
- c.Fuzz(&v.Refable)
+ c.Fill(&v.Refable)
return
}
- c.Fuzz(&v.HeaderProps)
- c.Fuzz(&v.VendorExtensible)
+ c.Fill(&v.HeaderProps)
+ c.Fill(&v.VendorExtensible)
},
- func(v *ResponsesProps, c fuzz.Continue) {
- c.Fuzz(&v.Default)
+ func(v *ResponsesProps, c randfill.Continue) {
+ c.Fill(&v.Default)
n := c.Intn(5)
for i := 0; i < n; i++ {
r2 := Response{}
- c.Fuzz(&r2)
+ c.Fill(&r2)
// HTTP Status code in 100-599 Range
code := c.Intn(500) + 100
v.StatusCodeResponses = make(map[int]*Response)
v.StatusCodeResponses[code] = &r2
}
},
- func(v *Response, c fuzz.Continue) {
+ func(v *Response, c randfill.Continue) {
if c.Intn(refChance) == 0 {
- c.Fuzz(&v.Refable)
+ c.Fill(&v.Refable)
return
}
- c.Fuzz(&v.ResponseProps)
- c.Fuzz(&v.VendorExtensible)
+ c.Fill(&v.ResponseProps)
+ c.Fill(&v.VendorExtensible)
},
- func(v *Operation, c fuzz.Continue) {
- c.FuzzNoCustom(v)
+ func(v *Operation, c randfill.Continue) {
+ c.FillNoCustom(v)
// Do not fuzz null values into the array.
for i, val := range v.SecurityRequirement {
if val == nil {
@@ -196,85 +196,85 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
}
}
},
- func(v *spec.Extensions, c fuzz.Continue) {
+ func(v *spec.Extensions, c randfill.Continue) {
numChildren := c.Intn(5)
for i := 0; i < numChildren; i++ {
if *v == nil {
*v = spec.Extensions{}
}
- (*v)["x-"+c.RandString()] = c.RandString()
+ (*v)["x-"+c.String(0)] = c.String(0)
}
},
- func(v *spec.ExternalDocumentation, c fuzz.Continue) {
- c.Fuzz(&v.Description)
+ func(v *spec.ExternalDocumentation, c randfill.Continue) {
+ c.Fill(&v.Description)
v.URL = "https://" + randAlphanumString()
},
- func(v *spec.SchemaURL, c fuzz.Continue) {
+ func(v *spec.SchemaURL, c randfill.Continue) {
*v = spec.SchemaURL("https://" + randAlphanumString())
},
- func(v *spec.SchemaOrBool, c fuzz.Continue) {
+ func(v *spec.SchemaOrBool, c randfill.Continue) {
*v = spec.SchemaOrBool{}
- if c.RandBool() {
- v.Allows = c.RandBool()
+ if c.Bool() {
+ v.Allows = c.Bool()
} else {
v.Schema = &spec.Schema{}
v.Allows = true
- c.Fuzz(&v.Schema)
+ c.Fill(&v.Schema)
}
},
- func(v *spec.SchemaOrArray, c fuzz.Continue) {
+ func(v *spec.SchemaOrArray, c randfill.Continue) {
*v = spec.SchemaOrArray{}
- if c.RandBool() {
+ if c.Bool() {
schema := spec.Schema{}
- c.Fuzz(&schema)
+ c.Fill(&schema)
v.Schema = &schema
} else {
v.Schemas = []spec.Schema{}
numChildren := c.Intn(5)
for i := 0; i < numChildren; i++ {
schema := spec.Schema{}
- c.Fuzz(&schema)
+ c.Fill(&schema)
v.Schemas = append(v.Schemas, schema)
}
}
},
- func(v *spec.SchemaOrStringArray, c fuzz.Continue) {
- if c.RandBool() {
+ func(v *spec.SchemaOrStringArray, c randfill.Continue) {
+ if c.Bool() {
*v = spec.SchemaOrStringArray{}
- if c.RandBool() {
- c.Fuzz(&v.Property)
+ if c.Bool() {
+ c.Fill(&v.Property)
} else {
- c.Fuzz(&v.Schema)
+ c.Fill(&v.Schema)
}
}
},
- func(v *spec.Schema, c fuzz.Continue) {
+ func(v *spec.Schema, c randfill.Continue) {
if c.Intn(refChance) == 0 {
- c.Fuzz(&v.Ref)
+ c.Fill(&v.Ref)
return
}
- if c.RandBool() {
+ if c.Bool() {
// file schema
- c.Fuzz(&v.Default)
- c.Fuzz(&v.Description)
- c.Fuzz(&v.Example)
- c.Fuzz(&v.ExternalDocs)
+ c.Fill(&v.Default)
+ c.Fill(&v.Description)
+ c.Fill(&v.Example)
+ c.Fill(&v.ExternalDocs)
- c.Fuzz(&v.Format)
- c.Fuzz(&v.ReadOnly)
- c.Fuzz(&v.Required)
- c.Fuzz(&v.Title)
+ c.Fill(&v.Format)
+ c.Fill(&v.ReadOnly)
+ c.Fill(&v.Required)
+ c.Fill(&v.Title)
v.Type = spec.StringOrArray{"file"}
} else {
// normal schema
- c.Fuzz(&v.SchemaProps)
- c.Fuzz(&v.SwaggerSchemaProps)
- c.Fuzz(&v.VendorExtensible)
- c.Fuzz(&v.ExtraProps)
+ c.Fill(&v.SchemaProps)
+ c.Fill(&v.SwaggerSchemaProps)
+ c.Fill(&v.VendorExtensible)
+ c.Fill(&v.ExtraProps)
}
},
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
index 5789e67ab7..c7b69b2005 100644
--- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
@@ -22,7 +22,7 @@ import (
"strings"
openapi_v2 "github.com/google/gnostic-models/openapiv2"
- "gopkg.in/yaml.v2"
+ yaml "go.yaml.in/yaml/v2"
)
func newSchemaError(path *Path, format string, a ...interface{}) error {
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go
index d9f2896e35..8694c6c769 100644
--- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go
@@ -22,7 +22,7 @@ import (
"strings"
openapi_v3 "github.com/google/gnostic-models/openapiv3"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
)
// Temporary parse implementation to be used until gnostic->kube-openapi conversion
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/trie.go b/vendor/k8s.io/kube-openapi/pkg/util/trie.go
new file mode 100644
index 0000000000..a9a76c1791
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/util/trie.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+// A simple trie implementation with Add and HasPrefix methods only.
+type Trie struct {
+ children map[byte]*Trie
+ wordTail bool
+ word string
+}
+
+// NewTrie creates a Trie and add all strings in the provided list to it.
+func NewTrie(list []string) Trie {
+ ret := Trie{
+ children: make(map[byte]*Trie),
+ wordTail: false,
+ }
+ for _, v := range list {
+ ret.Add(v)
+ }
+ return ret
+}
+
+// Add adds a word to this trie
+func (t *Trie) Add(v string) {
+ root := t
+ for _, b := range []byte(v) {
+ child, exists := root.children[b]
+ if !exists {
+ child = &Trie{
+ children: make(map[byte]*Trie),
+ wordTail: false,
+ }
+ root.children[b] = child
+ }
+ root = child
+ }
+ root.wordTail = true
+ root.word = v
+}
+
+// HasPrefix returns true of v has any of the prefixes stored in this trie.
+func (t *Trie) HasPrefix(v string) bool {
+ _, has := t.GetPrefix(v)
+ return has
+}
+
+// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise.
+func (t *Trie) GetPrefix(v string) (string, bool) {
+ root := t
+ if root.wordTail {
+ return root.word, true
+ }
+ for _, b := range []byte(v) {
+ child, exists := root.children[b]
+ if !exists {
+ return "", false
+ }
+ if child.wordTail {
+ return child.word, true
+ }
+ root = child
+ }
+ return "", false
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go
new file mode 100644
index 0000000000..830ec3ca09
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "reflect"
+ "strings"
+)
+
+// [DEPRECATED] ToCanonicalName converts Golang package/type canonical name into REST friendly OpenAPI name.
+// This method is deprecated because it has a misleading name. Please use ToRESTFriendlyName
+// instead
+//
+// NOTE: actually the "canonical name" in this method should be named "REST friendly OpenAPI name",
+// which is different from "canonical name" defined in GetCanonicalTypeName. The "canonical name" defined
+// in GetCanonicalTypeName means Go type names with full package path.
+//
+// Examples of REST friendly OpenAPI name:
+//
+// Input: k8s.io/api/core/v1.Pod
+// Output: io.k8s.api.core.v1.Pod
+//
+// Input: k8s.io/api/core/v1
+// Output: io.k8s.api.core.v1
+//
+// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
+// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
+func ToCanonicalName(name string) string {
+ return ToRESTFriendlyName(name)
+}
+
+// ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name.
+//
+// Examples of REST friendly OpenAPI name:
+//
+// Input: k8s.io/api/core/v1.Pod
+// Output: io.k8s.api.core.v1.Pod
+//
+// Input: k8s.io/api/core/v1
+// Output: io.k8s.api.core.v1
+//
+// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
+// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
+func ToRESTFriendlyName(name string) string {
+ nameParts := strings.Split(name, "/")
+ // Reverse first part. e.g., io.k8s... instead of k8s.io...
+ if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
+ parts := strings.Split(nameParts[0], ".")
+ for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
+ parts[i], parts[j] = parts[j], parts[i]
+ }
+ nameParts[0] = strings.Join(parts, ".")
+ }
+ return strings.Join(nameParts, ".")
+}
+
+// OpenAPICanonicalTypeNamer is an interface for models without Go type to seed model name.
+//
+// OpenAPI canonical names are Go type names with full package path, for uniquely indentifying
+// a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/"
+// should be used. For custom resource definition (CRD), the canonical name is expected to be
+//
+// group/version.kind
+//
+// Examples of canonical name:
+//
+// Go type: k8s.io/kubernetes/pkg/apis/core.Pod
+// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo
+//
+// Example for vendored Go type:
+//
+// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod
+// Canonical name: k8s.io/api/core/v1.Pod
+//
+// Original full path: vendor/k8s.io/api/core/v1.Pod
+// Canonical name: k8s.io/api/core/v1.Pod
+type OpenAPICanonicalTypeNamer interface {
+ OpenAPICanonicalTypeName() string
+}
+
+// OpenAPIModelNamer is an interface Go types may implement to provide an OpenAPI model name.
+//
+// This takes precedence over OpenAPICanonicalTypeNamer, and should be used when a Go type has a model
+// name that differs from its canonical type name as determined by Go package name reflection.
+type OpenAPIModelNamer interface {
+ OpenAPIModelName() string
+}
+
+// GetCanonicalTypeName will find the canonical type name of a sample object, removing
+// the "vendor" part of the path
+func GetCanonicalTypeName(model interface{}) string {
+ switch namer := model.(type) {
+ case OpenAPIModelNamer:
+ return namer.OpenAPIModelName()
+ case OpenAPICanonicalTypeNamer:
+ return namer.OpenAPICanonicalTypeName()
+ }
+ t := reflect.TypeOf(model)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.PkgPath() == "" {
+ return t.Name()
+ }
+ path := t.PkgPath()
+ if strings.Contains(path, "/vendor/") {
+ path = path[strings.Index(path, "/vendor/")+len("/vendor/"):]
+ } else if strings.HasPrefix(path, "vendor/") {
+ path = strings.TrimPrefix(path, "vendor/")
+ }
+ return path + "." + t.Name()
+}
diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go
index 79e11deb65..9503690be2 100644
--- a/vendor/k8s.io/utils/clock/testing/fake_clock.go
+++ b/vendor/k8s.io/utils/clock/testing/fake_clock.go
@@ -48,7 +48,6 @@ type fakeClockWaiter struct {
stepInterval time.Duration
skipIfBlocked bool
destChan chan time.Time
- fired bool
afterFunc func()
}
@@ -198,12 +197,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) {
if w.skipIfBlocked {
select {
case w.destChan <- t:
- w.fired = true
default:
}
} else {
w.destChan <- t
- w.fired = true
}
if w.afterFunc != nil {
@@ -224,14 +221,26 @@ func (f *FakeClock) setTimeLocked(t time.Time) {
f.waiters = newWaiters
}
-// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied (so you can
-// write race-free tests).
+// HasWaiters returns true if Waiters() returns non-0 (so you can write race-free tests).
func (f *FakeClock) HasWaiters() bool {
f.lock.RLock()
defer f.lock.RUnlock()
return len(f.waiters) > 0
}
+// Waiters returns the number of "waiters" on the clock (so you can write race-free
+// tests). A waiter exists for:
+// - every call to After that has not yet signaled its channel.
+// - every call to AfterFunc that has not yet called its callback.
+// - every timer created with NewTimer which is currently ticking.
+// - every ticker created with NewTicker which is currently ticking.
+// - every ticker created with Tick.
+func (f *FakeClock) Waiters() int {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return len(f.waiters)
+}
+
// Sleep is akin to time.Sleep
func (f *FakeClock) Sleep(d time.Duration) {
f.Step(d)
@@ -305,44 +314,48 @@ func (f *fakeTimer) C() <-chan time.Time {
return f.waiter.destChan
}
-// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
+// Stop prevents the Timer from firing. It returns true if the call stops the
+// timer, false if the timer has already expired or been stopped.
func (f *fakeTimer) Stop() bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
+ active := false
newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters))
for i := range f.fakeClock.waiters {
w := f.fakeClock.waiters[i]
if w != &f.waiter {
newWaiters = append(newWaiters, w)
+ continue
}
+ // If timer is found, it has not been fired yet.
+ active = true
}
f.fakeClock.waiters = newWaiters
- return !f.waiter.fired
+ return active
}
-// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
-// fired, or false otherwise.
+// Reset changes the timer to expire after duration d. It returns true if the
+// timer had been active, false if the timer had expired or been stopped.
func (f *fakeTimer) Reset(d time.Duration) bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
- active := !f.waiter.fired
+ active := false
- f.waiter.fired = false
f.waiter.targetTime = f.fakeClock.time.Add(d)
- var isWaiting bool
for i := range f.fakeClock.waiters {
w := f.fakeClock.waiters[i]
if w == &f.waiter {
- isWaiting = true
+ // If timer is found, it has not been fired yet.
+ active = true
break
}
}
- if !isWaiting {
+ if !active {
f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter)
}
diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go
index e4e740cad4..f64d64955b 100644
--- a/vendor/k8s.io/utils/integer/integer.go
+++ b/vendor/k8s.io/utils/integer/integer.go
@@ -16,7 +16,10 @@ limitations under the License.
package integer
-// IntMax returns the maximum of the params
+import "math"
+
+// IntMax returns the maximum of the params.
+// Deprecated: for new code, use the max() builtin instead.
func IntMax(a, b int) int {
if b > a {
return b
@@ -24,7 +27,8 @@ func IntMax(a, b int) int {
return a
}
-// IntMin returns the minimum of the params
+// IntMin returns the minimum of the params.
+// Deprecated: for new code, use the min() builtin instead.
func IntMin(a, b int) int {
if b < a {
return b
@@ -32,7 +36,8 @@ func IntMin(a, b int) int {
return a
}
-// Int32Max returns the maximum of the params
+// Int32Max returns the maximum of the params.
+// Deprecated: for new code, use the max() builtin instead.
func Int32Max(a, b int32) int32 {
if b > a {
return b
@@ -40,7 +45,8 @@ func Int32Max(a, b int32) int32 {
return a
}
-// Int32Min returns the minimum of the params
+// Int32Min returns the minimum of the params.
+// Deprecated: for new code, use the min() builtin instead.
func Int32Min(a, b int32) int32 {
if b < a {
return b
@@ -48,7 +54,8 @@ func Int32Min(a, b int32) int32 {
return a
}
-// Int64Max returns the maximum of the params
+// Int64Max returns the maximum of the params.
+// Deprecated: for new code, use the max() builtin instead.
func Int64Max(a, b int64) int64 {
if b > a {
return b
@@ -56,7 +63,8 @@ func Int64Max(a, b int64) int64 {
return a
}
-// Int64Min returns the minimum of the params
+// Int64Min returns the minimum of the params.
+// Deprecated: for new code, use the min() builtin instead.
func Int64Min(a, b int64) int64 {
if b < a {
return b
@@ -65,9 +73,7 @@ func Int64Min(a, b int64) int64 {
}
// RoundToInt32 rounds floats into integer numbers.
+// Deprecated: use math.Round() and a cast directly.
func RoundToInt32(a float64) int32 {
- if a < 0 {
- return int32(a - 0.5)
- }
- return int32(a + 0.5)
+ return int32(math.Round(a))
}
diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go
new file mode 100644
index 0000000000..e5d508055d
--- /dev/null
+++ b/vendor/k8s.io/utils/net/multi_listen.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+)
+
+// connErrPair pairs conn and error which is returned by accept on sub-listeners.
+type connErrPair struct {
+ conn net.Conn
+ err error
+}
+
+// multiListener implements net.Listener
+type multiListener struct {
+ listeners []net.Listener
+ wg sync.WaitGroup
+
+ // connCh passes accepted connections, from child listeners to parent.
+ connCh chan connErrPair
+ // stopCh communicates from parent to child listeners.
+ stopCh chan struct{}
+ closed atomic.Bool
+}
+
+// compile time check to ensure *multiListener implements net.Listener
+var _ net.Listener = &multiListener{}
+
+// MultiListen returns net.Listener which can listen on and accept connections for
+// the given network on multiple addresses. Internally it uses stdlib to create
+// sub-listener and multiplexes connection requests using go-routines.
+// The network must be "tcp", "tcp4" or "tcp6".
+// It follows the semantics of net.Listen that primarily means:
+// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen
+// listens on all available unicast and anycast IP addresses of the local system.
+// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively.
+// 3. The host can accept names (e.g, localhost) and it will create a listener for at
+// most one of the host's IP.
+func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) {
+ var lc net.ListenConfig
+ return multiListen(
+ ctx,
+ network,
+ addrs,
+ func(ctx context.Context, network, address string) (net.Listener, error) {
+ return lc.Listen(ctx, network, address)
+ })
+}
+
+// multiListen implements MultiListen by consuming stdlib functions as dependency allowing
+// mocking for unit-testing.
+func multiListen(
+ ctx context.Context,
+ network string,
+ addrs []string,
+ listenFunc func(ctx context.Context, network, address string) (net.Listener, error),
+) (net.Listener, error) {
+ if !(network == "tcp" || network == "tcp4" || network == "tcp6") {
+ return nil, fmt.Errorf("network %q not supported", network)
+ }
+ if len(addrs) == 0 {
+ return nil, fmt.Errorf("no address provided to listen on")
+ }
+
+ ml := &multiListener{
+ connCh: make(chan connErrPair),
+ stopCh: make(chan struct{}),
+ }
+ for _, addr := range addrs {
+ l, err := listenFunc(ctx, network, addr)
+ if err != nil {
+ // close all the sub-listeners and exit
+ _ = ml.Close()
+ return nil, err
+ }
+ ml.listeners = append(ml.listeners, l)
+ }
+
+ for _, l := range ml.listeners {
+ ml.wg.Add(1)
+ go func(l net.Listener) {
+ defer ml.wg.Done()
+ for {
+ // Accept() is blocking, unless ml.Close() is called, in which
+ // case it will return immediately with an error.
+ conn, err := l.Accept()
+ // This assumes that ANY error from Accept() will terminate the
+ // sub-listener. We could maybe be more precise, but it
+ // doesn't seem necessary.
+ terminate := err != nil
+
+ select {
+ case ml.connCh <- connErrPair{conn: conn, err: err}:
+ case <-ml.stopCh:
+ // In case we accepted a connection AND were stopped, and
+ // this select-case was chosen, just throw away the
+ // connection. This avoids potentially blocking on connCh
+ // or leaking a connection.
+ if conn != nil {
+ _ = conn.Close()
+ }
+ terminate = true
+ }
+ // Make sure we don't loop on Accept() returning an error and
+ // the select choosing the channel case.
+ if terminate {
+ return
+ }
+ }
+ }(l)
+ }
+ return ml, nil
+}
+
+// Accept implements net.Listener. It waits for and returns a connection from
+// any of the sub-listener.
+func (ml *multiListener) Accept() (net.Conn, error) {
+ // wait for any sub-listener to enqueue an accepted connection
+ connErr, ok := <-ml.connCh
+ if !ok {
+ // The channel will be closed only when Close() is called on the
+ // multiListener. Closing of this channel implies that all
+ // sub-listeners are also closed, which causes a "use of closed
+ // network connection" error on their Accept() calls. We return the
+ // same error for multiListener.Accept() if multiListener.Close()
+ // has already been called.
+ return nil, fmt.Errorf("use of closed network connection")
+ }
+ return connErr.conn, connErr.err
+}
+
+// Close implements net.Listener. It will close all sub-listeners and wait for
+// the go-routines to exit.
+func (ml *multiListener) Close() error {
+ // Make sure this can be called repeatedly without explosions.
+ if !ml.closed.CompareAndSwap(false, true) {
+ return fmt.Errorf("use of closed network connection")
+ }
+
+ // Tell all sub-listeners to stop.
+ close(ml.stopCh)
+
+ // Closing the listeners causes Accept() to immediately return an error in
+ // the sub-listener go-routines.
+ for _, l := range ml.listeners {
+ _ = l.Close()
+ }
+
+ // Wait for all the sub-listener go-routines to exit.
+ ml.wg.Wait()
+ close(ml.connCh)
+
+ // Drain any already-queued connections.
+ for connErr := range ml.connCh {
+ if connErr.conn != nil {
+ _ = connErr.conn.Close()
+ }
+ }
+ return nil
+}
+
+// Addr is an implementation of the net.Listener interface. It always returns
+// the address of the first listener. Callers should use conn.LocalAddr() to
+// obtain the actual local address of the sub-listener.
+func (ml *multiListener) Addr() net.Addr {
+ return ml.listeners[0].Addr()
+}
+
+// Addrs is like Addr, but returns the address for all registered listeners.
+func (ml *multiListener) Addrs() []net.Addr {
+ var ret []net.Addr
+ for _, l := range ml.listeners {
+ ret = append(ret, l.Addr())
+ }
+ return ret
+}
diff --git a/vendor/k8s.io/utils/strings/slices/slices.go b/vendor/k8s.io/utils/strings/slices/slices.go
deleted file mode 100644
index 8e21838f24..0000000000
--- a/vendor/k8s.io/utils/strings/slices/slices.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package slices defines various functions useful with slices of string type.
-// The goal is to be as close as possible to
-// https://github.com/golang/go/issues/45955. Ideal would be if we can just
-// replace "stringslices" if the "slices" package becomes standard.
-package slices
-
-// Equal reports whether two slices are equal: the same length and all
-// elements equal. If the lengths are different, Equal returns false.
-// Otherwise, the elements are compared in index order, and the
-// comparison stops at the first unequal pair.
-func Equal(s1, s2 []string) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, n := range s1 {
- if n != s2[i] {
- return false
- }
- }
- return true
-}
-
-// Filter appends to d each element e of s for which keep(e) returns true.
-// It returns the modified d. d may be s[:0], in which case the kept
-// elements will be stored in the same slice.
-// if the slices overlap in some other way, the results are unspecified.
-// To create a new slice with the filtered results, pass nil for d.
-func Filter(d, s []string, keep func(string) bool) []string {
- for _, n := range s {
- if keep(n) {
- d = append(d, n)
- }
- }
- return d
-}
-
-// Contains reports whether v is present in s.
-func Contains(s []string, v string) bool {
- return Index(s, v) >= 0
-}
-
-// Index returns the index of the first occurrence of v in s, or -1 if
-// not present.
-func Index(s []string, v string) int {
- // "Contains" may be replaced with "Index(s, v) >= 0":
- // https://github.com/golang/go/issues/45955#issuecomment-873377947
- for i, n := range s {
- if n == v {
- return i
- }
- }
- return -1
-}
-
-// Functions below are not in https://github.com/golang/go/issues/45955
-
-// Clone returns a new clone of s.
-func Clone(s []string) []string {
- // https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go
- if s == nil {
- return nil
- }
- c := make([]string, len(s))
- copy(c, s)
- return c
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ed4496dfd0..a0c8fb7fcc 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -12,6 +12,9 @@ github.com/Azure/go-ansiterm/winterm
# github.com/MakeNowJust/heredoc v1.0.0
## explicit; go 1.12
github.com/MakeNowJust/heredoc
+# github.com/Masterminds/semver/v3 v3.4.0
+## explicit; go 1.21
+github.com/Masterminds/semver/v3
# github.com/Microsoft/go-winio v0.6.2
## explicit; go 1.21
github.com/Microsoft/go-winio
@@ -223,6 +226,9 @@ github.com/felixge/httpsnoop
# github.com/fujiwara/shapeio v1.0.0
## explicit; go 1.16
github.com/fujiwara/shapeio
+# github.com/fxamacker/cbor/v2 v2.9.0
+## explicit; go 1.20
+github.com/fxamacker/cbor/v2
# github.com/gertd/go-pluralize v0.2.0
## explicit; go 1.17
github.com/gertd/go-pluralize
@@ -236,26 +242,25 @@ github.com/go-errors/errors
## explicit; go 1.18
github.com/go-logr/logr
github.com/go-logr/logr/funcr
-github.com/go-logr/logr/slogr
# github.com/go-logr/stdr v1.2.2
## explicit; go 1.16
github.com/go-logr/stdr
-# github.com/go-openapi/jsonpointer v0.19.6
-## explicit; go 1.13
+# github.com/go-openapi/jsonpointer v0.21.0
+## explicit; go 1.20
github.com/go-openapi/jsonpointer
# github.com/go-openapi/jsonreference v0.20.2
## explicit; go 1.13
github.com/go-openapi/jsonreference
github.com/go-openapi/jsonreference/internal
-# github.com/go-openapi/swag v0.22.3
-## explicit; go 1.18
+# github.com/go-openapi/swag v0.23.0
+## explicit; go 1.20
github.com/go-openapi/swag
# github.com/go-resty/resty/v2 v2.7.0
## explicit; go 1.11
github.com/go-resty/resty/v2
-# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572
-## explicit; go 1.13
-github.com/go-task/slim-sprig
+# github.com/go-task/slim-sprig/v3 v3.0.0
+## explicit; go 1.20
+github.com/go-task/slim-sprig/v3
# github.com/gofrs/flock v0.8.1
## explicit
github.com/gofrs/flock
@@ -272,15 +277,13 @@ github.com/gogo/protobuf/types
# github.com/golang/protobuf v1.5.4
## explicit; go 1.17
github.com/golang/protobuf/proto
-github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
-github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
# github.com/google/btree v1.0.1
## explicit; go 1.12
github.com/google/btree
-# github.com/google/gnostic-models v0.6.8
-## explicit; go 1.18
+# github.com/google/gnostic-models v0.7.0
+## explicit; go 1.22
github.com/google/gnostic-models/compiler
github.com/google/gnostic-models/extensions
github.com/google/gnostic-models/jsonschema
@@ -325,12 +328,8 @@ github.com/google/go-github/v30/github
# github.com/google/go-querystring v1.0.0
## explicit
github.com/google/go-querystring/query
-# github.com/google/gofuzz v1.2.0
-## explicit; go 1.12
-github.com/google/gofuzz
-github.com/google/gofuzz/bytesource
-# github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1
-## explicit; go 1.14
+# github.com/google/pprof v0.0.0-20250403155104-27863c87afa6
+## explicit; go 1.23
github.com/google/pprof/profile
# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
## explicit; go 1.13
@@ -520,7 +519,7 @@ github.com/moby/go-archive/tarheader
## explicit; go 1.19
github.com/moby/patternmatcher
github.com/moby/patternmatcher/ignorefile
-# github.com/moby/spdystream v0.2.0
+# github.com/moby/spdystream v0.5.0
## explicit; go 1.13
github.com/moby/spdystream
github.com/moby/spdystream/spdy
@@ -555,7 +554,7 @@ github.com/moby/term/windows
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
## explicit
github.com/modern-go/concurrent
-# github.com/modern-go/reflect2 v1.0.2
+# github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee
## explicit; go 1.12
github.com/modern-go/reflect2
# github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00
@@ -573,12 +572,13 @@ github.com/mxk/go-flowrate/flowrate
# github.com/olekukonko/tablewriter v0.0.5
## explicit; go 1.12
github.com/olekukonko/tablewriter
-# github.com/onsi/ginkgo/v2 v2.13.0
-## explicit; go 1.18
+# github.com/onsi/ginkgo/v2 v2.27.2
+## explicit; go 1.23.0
github.com/onsi/ginkgo/v2
github.com/onsi/ginkgo/v2/config
github.com/onsi/ginkgo/v2/formatter
github.com/onsi/ginkgo/v2/ginkgo
+github.com/onsi/ginkgo/v2/ginkgo/automaxprocs
github.com/onsi/ginkgo/v2/ginkgo/build
github.com/onsi/ginkgo/v2/ginkgo/command
github.com/onsi/ginkgo/v2/ginkgo/generators
@@ -592,16 +592,18 @@ github.com/onsi/ginkgo/v2/internal
github.com/onsi/ginkgo/v2/internal/global
github.com/onsi/ginkgo/v2/internal/interrupt_handler
github.com/onsi/ginkgo/v2/internal/parallel_support
+github.com/onsi/ginkgo/v2/internal/reporters
github.com/onsi/ginkgo/v2/internal/testingtproxy
github.com/onsi/ginkgo/v2/reporters
github.com/onsi/ginkgo/v2/types
-# github.com/onsi/gomega v1.29.0
-## explicit; go 1.18
+# github.com/onsi/gomega v1.38.2
+## explicit; go 1.23.0
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/internal
github.com/onsi/gomega/internal/gutil
github.com/onsi/gomega/matchers
+github.com/onsi/gomega/matchers/internal/miter
github.com/onsi/gomega/matchers/support/goraph/bipartitegraph
github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
@@ -703,6 +705,9 @@ github.com/vbatts/tar-split/archive/tar
# github.com/vmware-labs/yaml-jsonpath v0.3.2
## explicit; go 1.13
github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath
+# github.com/x448/float16 v0.8.4
+## explicit; go 1.11
+github.com/x448/float16
# github.com/xanzy/ssh-agent v0.2.1
## explicit
github.com/xanzy/ssh-agent
@@ -783,8 +788,14 @@ go.starlark.net/resolve
go.starlark.net/starlark
go.starlark.net/starlarkstruct
go.starlark.net/syntax
-# golang.org/x/crypto v0.40.0
-## explicit; go 1.23.0
+# go.yaml.in/yaml/v2 v2.4.3
+## explicit; go 1.15
+go.yaml.in/yaml/v2
+# go.yaml.in/yaml/v3 v3.0.4
+## explicit; go 1.16
+go.yaml.in/yaml/v3
+# golang.org/x/crypto v0.44.0
+## explicit; go 1.24.0
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
golang.org/x/crypto/chacha20
@@ -802,8 +813,11 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
-# golang.org/x/net v0.42.0
-## explicit; go 1.23.0
+# golang.org/x/mod v0.29.0
+## explicit; go 1.24.0
+golang.org/x/mod/semver
+# golang.org/x/net v0.47.0
+## explicit; go 1.24.0
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
golang.org/x/net/html
@@ -823,21 +837,21 @@ golang.org/x/net/trace
## explicit; go 1.23.0
golang.org/x/oauth2
golang.org/x/oauth2/internal
-# golang.org/x/sync v0.16.0
-## explicit; go 1.23.0
+# golang.org/x/sync v0.18.0
+## explicit; go 1.24.0
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.34.0
-## explicit; go 1.23.0
+# golang.org/x/sys v0.38.0
+## explicit; go 1.24.0
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.33.0
-## explicit; go 1.23.0
+# golang.org/x/term v0.37.0
+## explicit; go 1.24.0
golang.org/x/term
-# golang.org/x/text v0.27.0
-## explicit; go 1.23.0
+# golang.org/x/text v0.31.0
+## explicit; go 1.24.0
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
golang.org/x/text/encoding/htmlindex
@@ -859,13 +873,31 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
-# golang.org/x/time v0.3.0
-## explicit
+# golang.org/x/time v0.9.0
+## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.34.0
-## explicit; go 1.23.0
+# golang.org/x/tools v0.38.0
+## explicit; go 1.24.0
+golang.org/x/tools/cover
golang.org/x/tools/go/ast/edge
golang.org/x/tools/go/ast/inspector
+golang.org/x/tools/go/gcexportdata
+golang.org/x/tools/go/packages
+golang.org/x/tools/go/types/objectpath
+golang.org/x/tools/go/types/typeutil
+golang.org/x/tools/internal/aliases
+golang.org/x/tools/internal/event
+golang.org/x/tools/internal/event/core
+golang.org/x/tools/internal/event/keys
+golang.org/x/tools/internal/event/label
+golang.org/x/tools/internal/gcimporter
+golang.org/x/tools/internal/gocommand
+golang.org/x/tools/internal/packagesinternal
+golang.org/x/tools/internal/pkgbits
+golang.org/x/tools/internal/stdlib
+golang.org/x/tools/internal/typeparams
+golang.org/x/tools/internal/typesinternal
+golang.org/x/tools/internal/versions
# google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a
## explicit; go 1.23.0
google.golang.org/genproto/googleapis/api/httpbody
@@ -940,8 +972,8 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.6
-## explicit; go 1.22
+# google.golang.org/protobuf v1.36.8
+## explicit; go 1.23
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
@@ -1111,12 +1143,15 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
-# k8s.io/apimachinery v0.29.0
-## explicit; go 1.21
+# k8s.io/apimachinery v0.35.0
+## explicit; go 1.25.0
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
+k8s.io/apimachinery/pkg/api/operation
k8s.io/apimachinery/pkg/api/resource
+k8s.io/apimachinery/pkg/api/validate/constraints
+k8s.io/apimachinery/pkg/api/validate/content
k8s.io/apimachinery/pkg/api/validation
k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
@@ -1130,6 +1165,8 @@ k8s.io/apimachinery/pkg/labels
k8s.io/apimachinery/pkg/runtime
k8s.io/apimachinery/pkg/runtime/schema
k8s.io/apimachinery/pkg/runtime/serializer
+k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct
+k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes
k8s.io/apimachinery/pkg/runtime/serializer/json
k8s.io/apimachinery/pkg/runtime/serializer/protobuf
k8s.io/apimachinery/pkg/runtime/serializer/recognizer
@@ -1383,8 +1420,8 @@ k8s.io/component-base/version
# k8s.io/klog v1.0.0
## explicit; go 1.12
k8s.io/klog
-# k8s.io/klog/v2 v2.110.1
-## explicit; go 1.13
+# k8s.io/klog/v2 v2.130.1
+## explicit; go 1.18
k8s.io/klog/v2
k8s.io/klog/v2/internal/buffer
k8s.io/klog/v2/internal/clock
@@ -1392,8 +1429,8 @@ k8s.io/klog/v2/internal/dbg
k8s.io/klog/v2/internal/serialize
k8s.io/klog/v2/internal/severity
k8s.io/klog/v2/internal/sloghandler
-# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00
-## explicit; go 1.19
+# k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912
+## explicit; go 1.23.0
k8s.io/kube-openapi/pkg/cached
k8s.io/kube-openapi/pkg/common
k8s.io/kube-openapi/pkg/handler3
@@ -1401,6 +1438,7 @@ k8s.io/kube-openapi/pkg/internal
k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json
k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/spec3
+k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation
k8s.io/kube-openapi/pkg/validation/spec
@@ -1415,7 +1453,7 @@ k8s.io/kubectl/pkg/util/openapi
k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation
-# k8s.io/utils v0.0.0-20230726121419-3b25d923346b
+# k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
## explicit; go 1.18
k8s.io/utils/clock
k8s.io/utils/clock/testing
@@ -1425,7 +1463,6 @@ k8s.io/utils/internal/third_party/forked/golang/net
k8s.io/utils/net
k8s.io/utils/pointer
k8s.io/utils/ptr
-k8s.io/utils/strings/slices
# mvdan.cc/sh/v3 v3.5.1
## explicit; go 1.17
mvdan.cc/sh/v3/expand
@@ -1433,8 +1470,8 @@ mvdan.cc/sh/v3/fileutil
mvdan.cc/sh/v3/interp
mvdan.cc/sh/v3/pattern
mvdan.cc/sh/v3/syntax
-# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
-## explicit; go 1.18
+# sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730
+## explicit; go 1.23
sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json
# sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3
@@ -1520,15 +1557,24 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2
sigs.k8s.io/kustomize/kyaml/yaml/merge3
sigs.k8s.io/kustomize/kyaml/yaml/schema
sigs.k8s.io/kustomize/kyaml/yaml/walk
+# sigs.k8s.io/randfill v1.0.0
+## explicit; go 1.18
+sigs.k8s.io/randfill
+sigs.k8s.io/randfill/bytesource
# sigs.k8s.io/structured-merge-diff/v4 v4.4.1
## explicit; go 1.13
sigs.k8s.io/structured-merge-diff/v4/fieldpath
-sigs.k8s.io/structured-merge-diff/v4/merge
sigs.k8s.io/structured-merge-diff/v4/schema
sigs.k8s.io/structured-merge-diff/v4/typed
sigs.k8s.io/structured-merge-diff/v4/value
-# sigs.k8s.io/yaml v1.4.0
-## explicit; go 1.12
+# sigs.k8s.io/structured-merge-diff/v6 v6.3.0
+## explicit; go 1.23
+sigs.k8s.io/structured-merge-diff/v6/fieldpath
+sigs.k8s.io/structured-merge-diff/v6/merge
+sigs.k8s.io/structured-merge-diff/v6/schema
+sigs.k8s.io/structured-merge-diff/v6/typed
+sigs.k8s.io/structured-merge-diff/v6/value
+# sigs.k8s.io/yaml v1.6.0
+## explicit; go 1.22
sigs.k8s.io/yaml
-sigs.k8s.io/yaml/goyaml.v2
# github.com/distribution/reference => github.com/distribution/reference v0.5.0
diff --git a/vendor/sigs.k8s.io/json/Makefile b/vendor/sigs.k8s.io/json/Makefile
index 07b8bfa857..fb6cf040f5 100644
--- a/vendor/sigs.k8s.io/json/Makefile
+++ b/vendor/sigs.k8s.io/json/Makefile
@@ -19,7 +19,7 @@ vet:
go vet sigs.k8s.io/json
@echo "checking for external dependencies"
- @deps=$$(go mod graph); \
+ @deps=$$(go list -f '{{ if not (or .Standard .Module.Main) }}{{.ImportPath}}{{ end }}' -deps sigs.k8s.io/json/... || true); \
if [ -n "$${deps}" ]; then \
echo "only stdlib dependencies allowed, found:"; \
echo "$${deps}"; \
diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS
index 0fadafbddb..a08a434e61 100644
--- a/vendor/sigs.k8s.io/json/OWNERS
+++ b/vendor/sigs.k8s.io/json/OWNERS
@@ -2,5 +2,5 @@
approvers:
- deads2k
- - lavalamp
+ - jpbetz
- liggitt
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
index 6a13cf2df0..3fe528bbf3 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
@@ -21,10 +21,10 @@ import (
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v. If v is nil or not a pointer,
-// Unmarshal returns an InvalidUnmarshalError.
+// Unmarshal returns an [InvalidUnmarshalError].
//
// Unmarshal uses the inverse of the encodings that
-// Marshal uses, allocating maps, slices, and pointers as necessary,
+// [Marshal] uses, allocating maps, slices, and pointers as necessary,
// with the following additional rules:
//
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
@@ -33,28 +33,28 @@ import (
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
// allocates a new value for it to point to.
//
-// To unmarshal JSON into a value implementing the Unmarshaler interface,
-// Unmarshal calls that value's UnmarshalJSON method, including
+// To unmarshal JSON into a value implementing [Unmarshaler],
+// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including
// when the input is a JSON null.
-// Otherwise, if the value implements encoding.TextUnmarshaler
-// and the input is a JSON quoted string, Unmarshal calls that value's
-// UnmarshalText method with the unquoted form of the string.
+// Otherwise, if the value implements [encoding.TextUnmarshaler]
+// and the input is a JSON quoted string, Unmarshal calls
+// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string.
//
// To unmarshal JSON into a struct, Unmarshal matches incoming object
-// keys to the keys used by Marshal (either the struct field name or its tag),
+// keys to the keys used by [Marshal] (either the struct field name or its tag),
// preferring an exact match but also accepting a case-insensitive match. By
// default, object keys which don't have a corresponding struct field are
-// ignored (see Decoder.DisallowUnknownFields for an alternative).
+// ignored (see [Decoder.DisallowUnknownFields] for an alternative).
//
// To unmarshal JSON into an interface value,
// Unmarshal stores one of these in the interface value:
//
-// bool, for JSON booleans
-// float64, for JSON numbers
-// string, for JSON strings
-// []interface{}, for JSON arrays
-// map[string]interface{}, for JSON objects
-// nil for JSON null
+// - bool, for JSON booleans
+// - float64, for JSON numbers
+// - string, for JSON strings
+// - []any, for JSON arrays
+// - map[string]any, for JSON objects
+// - nil for JSON null
//
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
// to zero and then appends each element to the slice.
@@ -72,16 +72,15 @@ import (
// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
// reuses the existing map, keeping existing entries. Unmarshal then stores
// key-value pairs from the JSON object into the map. The map's key type must
-// either be any string type, an integer, implement json.Unmarshaler, or
-// implement encoding.TextUnmarshaler.
+// either be any string type, an integer, or implement [encoding.TextUnmarshaler].
//
-// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError.
+// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError].
//
// If a JSON value is not appropriate for a given target type,
// or if a JSON number overflows the target type, Unmarshal
// skips that field and completes the unmarshaling as best it can.
// If no more serious errors are encountered, Unmarshal returns
-// an UnmarshalTypeError describing the earliest such error. In any
+// an [UnmarshalTypeError] describing the earliest such error. In any
// case, it's not guaranteed that all the remaining fields following
// the problematic one will be unmarshaled into the target object.
//
@@ -118,9 +117,6 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error {
// The input can be assumed to be a valid encoding of
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
-//
-// By convention, to approximate the behavior of Unmarshal itself,
-// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
}
@@ -133,7 +129,7 @@ type UnmarshalTypeError struct {
Type reflect.Type // type of Go value it could not be assigned to
Offset int64 // error occurred after reading Offset bytes
Struct string // name of the struct type containing the field
- Field string // the full path from root node to the field
+ Field string // the full path from root node to the field, include embedded struct
}
func (e *UnmarshalTypeError) Error() string {
@@ -157,8 +153,8 @@ func (e *UnmarshalFieldError) Error() string {
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
}
-// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
-// (The argument to Unmarshal must be a non-nil pointer.)
+// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal].
+// (The argument to [Unmarshal] must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
@@ -282,7 +278,11 @@ func (d *decodeState) addErrorContext(err error) error {
switch err := err.(type) {
case *UnmarshalTypeError:
err.Struct = d.errorContext.Struct.Name()
- err.Field = strings.Join(d.errorContext.FieldStack, ".")
+ fieldStack := d.errorContext.FieldStack
+ if err.Field != "" {
+ fieldStack = append(fieldStack, err.Field)
+ }
+ err.Field = strings.Join(fieldStack, ".")
}
}
return err
@@ -493,9 +493,9 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm
}
// Prevent infinite loop if v is an interface pointing to its own address:
- // var v interface{}
+ // var v any
// v = &v
- if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v {
+ if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) {
v = v.Elem()
break
}
@@ -573,17 +573,10 @@ func (d *decodeState) array(v reflect.Value) error {
break
}
- // Get element of array, growing if necessary.
+ // Expand slice length, growing the slice if necessary.
if v.Kind() == reflect.Slice {
- // Grow slice if necessary
if i >= v.Cap() {
- newcap := v.Cap() + v.Cap()/2
- if newcap < 4 {
- newcap = 4
- }
- newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
- reflect.Copy(newv, v)
- v.Set(newv)
+ v.Grow(1)
}
if i >= v.Len() {
v.SetLen(i + 1)
@@ -620,13 +613,11 @@ func (d *decodeState) array(v reflect.Value) error {
if i < v.Len() {
if v.Kind() == reflect.Array {
- // Array. Zero the rest.
- z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
- v.Index(i).Set(z)
+ v.Index(i).SetZero() // zero remainder of array
}
} else {
- v.SetLen(i)
+ v.SetLen(i) // truncate the slice
}
}
if i == 0 && v.Kind() == reflect.Slice {
@@ -636,7 +627,7 @@ func (d *decodeState) array(v reflect.Value) error {
}
var nullLiteral = []byte("null")
-var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
// object consumes an object from d.data[d.off-1:], decoding into v.
// The first byte ('{') of the object has been read already.
@@ -776,7 +767,7 @@ func (d *decodeState) object(v reflect.Value) error {
if !mapElem.IsValid() {
mapElem = reflect.New(elemType).Elem()
} else {
- mapElem.Set(reflect.Zero(elemType))
+ mapElem.SetZero()
}
subv = mapElem
if checkDuplicateField != nil {
@@ -784,31 +775,20 @@ func (d *decodeState) object(v reflect.Value) error {
}
d.appendStrictFieldStackKey(string(key))
} else {
- var f *field
- if i, ok := fields.nameIndex[string(key)]; ok {
- // Found an exact name match.
- f = &fields.list[i]
- if checkDuplicateField != nil {
- checkDuplicateField(i, f.name)
- }
- } else if !d.caseSensitive {
- // Fall back to the expensive case-insensitive
- // linear search.
- for i := range fields.list {
- ff := &fields.list[i]
- if ff.equalFold(ff.nameBytes, key) {
- f = ff
- if checkDuplicateField != nil {
- checkDuplicateField(i, f.name)
- }
- break
- }
- }
+ f := fields.byExactName[string(key)]
+ if f == nil && !d.caseSensitive {
+ f = fields.byFoldedName[string(foldName(key))]
}
if f != nil {
+ if checkDuplicateField != nil {
+ checkDuplicateField(f.listIndex, f.name)
+ }
subv = v
destring = f.quoted
- for _, i := range f.index {
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ for i, ind := range f.index {
if subv.Kind() == reflect.Pointer {
if subv.IsNil() {
// If a struct embeds a pointer to an unexported type,
@@ -828,13 +808,16 @@ func (d *decodeState) object(v reflect.Value) error {
}
subv = subv.Elem()
}
- subv = subv.Field(i)
- }
- if d.errorContext == nil {
- d.errorContext = new(errorContext)
+ if i < len(f.index)-1 {
+ d.errorContext.FieldStack = append(
+ d.errorContext.FieldStack,
+ subv.Type().Field(ind).Name,
+ )
+ }
+ subv = subv.Field(ind)
}
- d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
d.errorContext.Struct = t
+ d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
d.appendStrictFieldStackKey(f.name)
} else if d.disallowUnknownFields {
d.saveStrictError(d.newFieldError(unknownStrictErrType, string(key)))
@@ -874,33 +857,35 @@ func (d *decodeState) object(v reflect.Value) error {
if v.Kind() == reflect.Map {
kt := t.Key()
var kv reflect.Value
- switch {
- case reflect.PointerTo(kt).Implements(textUnmarshalerType):
+ if reflect.PointerTo(kt).Implements(textUnmarshalerType) {
kv = reflect.New(kt)
if err := d.literalStore(item, kv, true); err != nil {
return err
}
kv = kv.Elem()
- case kt.Kind() == reflect.String:
- kv = reflect.ValueOf(key).Convert(kt)
- default:
+ } else {
switch kt.Kind() {
+ case reflect.String:
+ kv = reflect.New(kt).Elem()
+ kv.SetString(string(key))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := string(key)
n, err := strconv.ParseInt(s, 10, 64)
- if err != nil || reflect.Zero(kt).OverflowInt(n) {
+ if err != nil || kt.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
- kv = reflect.ValueOf(n).Convert(kt)
+ kv = reflect.New(kt).Elem()
+ kv.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s := string(key)
n, err := strconv.ParseUint(s, 10, 64)
- if err != nil || reflect.Zero(kt).OverflowUint(n) {
+ if err != nil || kt.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
- kv = reflect.ValueOf(n).Convert(kt)
+ kv = reflect.New(kt).Elem()
+ kv.SetUint(n)
default:
panic("json: Unexpected key type") // should never occur
}
@@ -950,12 +935,12 @@ func (d *decodeState) convertNumber(s string) (any, error) {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
- return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
+ return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)}
}
return f, nil
}
-var numberType = reflect.TypeOf(Number(""))
+var numberType = reflect.TypeFor[Number]()
// literalStore decodes a literal stored in item into v.
//
@@ -965,7 +950,7 @@ var numberType = reflect.TypeOf(Number(""))
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
// Check for unmarshaler.
if len(item) == 0 {
- //Empty string given
+ // Empty string given.
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
@@ -1012,7 +997,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
switch v.Kind() {
case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice:
- v.Set(reflect.Zero(v.Type()))
+ v.SetZero()
// otherwise, ignore null for primitives/string
}
case 't', 'f': // true, false
@@ -1064,10 +1049,11 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
v.SetBytes(b[:n])
case reflect.String:
- if v.Type() == numberType && !isValidNumber(string(s)) {
+ t := string(s)
+ if v.Type() == numberType && !isValidNumber(t) {
return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)
}
- v.SetString(string(s))
+ v.SetString(t)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(string(s)))
@@ -1083,13 +1069,12 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
panic(phasePanicMsg)
}
- s := string(item)
switch v.Kind() {
default:
if v.Kind() == reflect.String && v.Type() == numberType {
// s must be a valid number, because it's
// already been tokenized.
- v.SetString(s)
+ v.SetString(string(item))
break
}
if fromQuoted {
@@ -1097,7 +1082,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Interface:
- n, err := d.convertNumber(s)
+ n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
break
@@ -1109,25 +1094,25 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
v.Set(reflect.ValueOf(n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- n, err := strconv.ParseInt(s, 10, 64)
+ n, err := strconv.ParseInt(string(item), 10, 64)
if err != nil || v.OverflowInt(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- n, err := strconv.ParseUint(s, 10, 64)
+ n, err := strconv.ParseUint(string(item), 10, 64)
if err != nil || v.OverflowUint(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetUint(n)
case reflect.Float32, reflect.Float64:
- n, err := strconv.ParseFloat(s, v.Type().Bits())
+ n, err := strconv.ParseFloat(string(item), v.Type().Bits())
if err != nil || v.OverflowFloat(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetFloat(n)
@@ -1140,7 +1125,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
// in an empty interface. They are not strictly necessary,
// but they avoid the weight of reflection in this common case.
-// valueInterface is like value but returns interface{}
+// valueInterface is like value but returns any.
func (d *decodeState) valueInterface() (val any) {
switch d.opcode {
default:
@@ -1157,7 +1142,7 @@ func (d *decodeState) valueInterface() (val any) {
return
}
-// arrayInterface is like array but returns []interface{}.
+// arrayInterface is like array but returns []any.
func (d *decodeState) arrayInterface() []any {
origStrictFieldStackLen := len(d.strictFieldStack)
defer func() {
@@ -1192,7 +1177,7 @@ func (d *decodeState) arrayInterface() []any {
return v
}
-// objectInterface is like object but returns map[string]interface{}.
+// objectInterface is like object but returns map[string]any.
func (d *decodeState) objectInterface() map[string]any {
origStrictFieldStackLen := len(d.strictFieldStack)
defer func() {
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
index 5b67251fbb..4e3a1a2f10 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
@@ -12,12 +12,13 @@ package json
import (
"bytes"
+ "cmp"
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
- "sort"
+ "slices"
"strconv"
"strings"
"sync"
@@ -28,29 +29,30 @@ import (
// Marshal returns the JSON encoding of v.
//
// Marshal traverses the value v recursively.
-// If an encountered value implements the Marshaler interface
-// and is not a nil pointer, Marshal calls its MarshalJSON method
-// to produce JSON. If no MarshalJSON method is present but the
-// value implements encoding.TextMarshaler instead, Marshal calls
-// its MarshalText method and encodes the result as a JSON string.
+// If an encountered value implements [Marshaler]
+// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON]
+// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the
+// value implements [encoding.TextMarshaler] instead, Marshal calls
+// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string.
// The nil pointer exception is not strictly necessary
// but mimics a similar, necessary exception in the behavior of
-// UnmarshalJSON.
+// [Unmarshaler.UnmarshalJSON].
//
// Otherwise, Marshal uses the following type-dependent default encodings:
//
// Boolean values encode as JSON booleans.
//
-// Floating point, integer, and Number values encode as JSON numbers.
+// Floating point, integer, and [Number] values encode as JSON numbers.
+// NaN and +/-Inf values will return an [UnsupportedValueError].
//
// String values encode as JSON strings coerced to valid UTF-8,
// replacing invalid bytes with the Unicode replacement rune.
// So that the JSON will be safe to embed inside HTML